[clang] d224be3 - [RISCV] Add the policy operand for some masked RVV ternary IR intrinsics.

Zakk Chen via cfe-commits cfe-commits at lists.llvm.org
Fri Feb 11 05:07:37 PST 2022


Author: Zakk Chen
Date: 2022-02-11T05:02:03-08:00
New Revision: d224be3b999afb7c4daa9c0ca807dea8123a7593

URL: https://github.com/llvm/llvm-project/commit/d224be3b999afb7c4daa9c0ca807dea8123a7593
DIFF: https://github.com/llvm/llvm-project/commit/d224be3b999afb7c4daa9c0ca807dea8123a7593.diff

LOG: [RISCV] Add the policy operand for some masked RVV ternary IR intrinsics.

Masked reduction intrinsics are specical cases which don't need to have policy
operand. The mask only affects which elements are read. It doesn't effect the
destination register.
The reduction intrinsics have a dedicated destination operand. If it
is undef, we use tail agnostic. If it not undef we use tail
undisturbed.

Co-Authored-by: Craig Topper <craig.topper at sifive.com>

Differential Revision: https://reviews.llvm.org/D117681

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
    clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
    llvm/include/llvm/IR/IntrinsicsRISCV.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
    llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
    llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
    llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 6451e77e77f63..d02a4fffb1411 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -1700,7 +1700,6 @@ defm vwmulsu : RVVOutOp0Op1BuiltinSet<"vwmulsu", "csi",
 }
 
 // 12.13. Vector Single-Width Integer Multiply-Add Instructions
-let HasPolicy = false in {
 defm vmacc  : RVVIntTerBuiltinSet;
 defm vnmsac : RVVIntTerBuiltinSet;
 defm vmadd  : RVVIntTerBuiltinSet;
@@ -1721,7 +1720,6 @@ defm vwmaccsu : RVVOutOp1Op2BuiltinSet<"vwmaccsu", "csi",
 defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
                                        [["vx", "w", "wwUev"]]>;
 }
-}
 
 // 12.15. Vector Integer Merge Instructions
 // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (op1, op2, mask, vl)
@@ -1804,7 +1802,6 @@ let Log2LMUL = [-2, -1, 0, 1, 2] in {
 }
 
 // 14.6. Vector Single-Width Floating-Point Fused Multiply-Add Instructions
-let HasPolicy = false in {
 defm vfmacc  : RVVFloatingTerBuiltinSet;
 defm vfnmacc : RVVFloatingTerBuiltinSet;
 defm vfmsac  : RVVFloatingTerBuiltinSet;
@@ -1819,7 +1816,6 @@ defm vfwmacc  : RVVFloatingWidenTerBuiltinSet;
 defm vfwnmacc : RVVFloatingWidenTerBuiltinSet;
 defm vfwmsac  : RVVFloatingWidenTerBuiltinSet;
 defm vfwnmsac : RVVFloatingWidenTerBuiltinSet;
-}
 
 // 14.8. Vector Floating-Point Square-Root Instruction
 def vfsqrt : RVVFloatingUnaryVVBuiltin;
@@ -2003,12 +1999,10 @@ let HasMask = false, HasPolicy = false in {
 }
 
 // 17.3. Vector Slide Instructions
-let HasPolicy = false in {
 // 17.3.1. Vector Slideup Instructions
 defm vslideup   : RVVSlideBuiltinSet;
 // 17.3.2. Vector Slidedown Instructions
 defm vslidedown : RVVSlideBuiltinSet;
-}
 
 // 17.3.3. Vector Slide1up Instructions
 defm vslide1up : RVVSlideOneBuiltinSet;

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
index 4c44b69ca654a..963271d80ca16 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmacc.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -208,7 +208,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -219,7 +219,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -240,7 +240,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -261,7 +261,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -271,7 +271,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -282,7 +282,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -292,7 +292,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -303,7 +303,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -313,7 +313,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -324,7 +324,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -334,7 +334,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -345,7 +345,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -355,7 +355,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -366,7 +366,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
index 1a81935f88490..09734276482dd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmadd.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -208,7 +208,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -219,7 +219,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -240,7 +240,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -261,7 +261,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -271,7 +271,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -282,7 +282,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -292,7 +292,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -303,7 +303,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -313,7 +313,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -324,7 +324,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -334,7 +334,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -345,7 +345,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -355,7 +355,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -366,7 +366,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
index e42979c08f349..36308a35ef815 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsac.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -208,7 +208,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -219,7 +219,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -240,7 +240,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -261,7 +261,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -271,7 +271,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -282,7 +282,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -292,7 +292,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -303,7 +303,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -313,7 +313,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -324,7 +324,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -334,7 +334,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -345,7 +345,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -355,7 +355,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -366,7 +366,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
index b3dace5e66bd4..abc60568c3557 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfmsub.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -208,7 +208,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -219,7 +219,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -229,7 +229,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -240,7 +240,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -250,7 +250,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -261,7 +261,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -271,7 +271,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -282,7 +282,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -292,7 +292,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -303,7 +303,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -313,7 +313,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -324,7 +324,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -334,7 +334,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -345,7 +345,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -355,7 +355,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -366,7 +366,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
index c4a534d11ad41..c76d6b88d0869 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmacc.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
index 42a257ad9c0d1..d1e5246cd2949 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmadd.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
index 8360fa5fa3006..24c2c9abbcb42 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsac.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
index b75872317f473..c7c85314b5699 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfnmsub.c
@@ -187,7 +187,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -198,7 +198,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
index 4e55316a3eeca..462f80cc4afb1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmacc.c
@@ -87,7 +87,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -98,7 +98,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -108,7 +108,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -119,7 +119,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -129,7 +129,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -140,7 +140,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -161,7 +161,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
index c4086f06beb53..1a7c37a298449 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwmsac.c
@@ -87,7 +87,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -98,7 +98,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -108,7 +108,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -119,7 +119,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -129,7 +129,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -140,7 +140,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -161,7 +161,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
index 4b5ccba44758f..9ac61004637ae 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmacc.c
@@ -87,7 +87,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -98,7 +98,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -108,7 +108,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -119,7 +119,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -129,7 +129,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -140,7 +140,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -161,7 +161,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
index 16895d5f4b48d..f237873e74a92 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vfwnmsac.c
@@ -87,7 +87,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -98,7 +98,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -108,7 +108,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -119,7 +119,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -129,7 +129,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -140,7 +140,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -150,7 +150,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -161,7 +161,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
index b42dd449964f4..569301d120b9b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmacc.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
index 197fdce0d4f11..e6ece6f876b2a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vmadd.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
index 9c0683af889dc..211dcf4d636ba 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsac.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
index 9d3b6a0021b46..a27ef3451d600 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vnmsub.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
index c07a80a1bc776..ed45f08c94b04 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslidedown.c
@@ -537,7 +537,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
@@ -548,7 +548,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
@@ -559,7 +559,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
@@ -570,7 +570,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
@@ -580,7 +580,7 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
@@ -590,7 +590,7 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
@@ -600,7 +600,7 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
@@ -610,7 +610,7 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
@@ -621,7 +621,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
@@ -632,7 +632,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
@@ -643,7 +643,7 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
@@ -654,7 +654,7 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
@@ -665,7 +665,7 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
@@ -676,7 +676,7 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
@@ -687,7 +687,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
@@ -698,7 +698,7 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
@@ -709,7 +709,7 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
@@ -720,7 +720,7 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
@@ -731,7 +731,7 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
@@ -742,7 +742,7 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
@@ -753,7 +753,7 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
@@ -764,7 +764,7 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
@@ -775,7 +775,7 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
@@ -786,7 +786,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
@@ -797,7 +797,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
@@ -808,7 +808,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
@@ -818,7 +818,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
@@ -828,7 +828,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
@@ -838,7 +838,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
@@ -848,7 +848,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
@@ -859,7 +859,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
@@ -870,7 +870,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
@@ -881,7 +881,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
@@ -892,7 +892,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
@@ -903,7 +903,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
@@ -914,7 +914,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
@@ -925,7 +925,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
@@ -936,7 +936,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
@@ -947,7 +947,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
@@ -958,7 +958,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
@@ -969,7 +969,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
@@ -980,7 +980,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
@@ -991,7 +991,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
@@ -1002,7 +1002,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
@@ -1013,7 +1013,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
@@ -1024,7 +1024,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
@@ -1035,7 +1035,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
@@ -1046,7 +1046,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
@@ -1057,7 +1057,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
@@ -1068,7 +1068,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
@@ -1079,7 +1079,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
@@ -1090,7 +1090,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
@@ -1101,7 +1101,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
index 415934c6e2e64..380296f0e4946 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vslideup.c
@@ -537,7 +537,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
@@ -547,7 +547,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
@@ -557,7 +557,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
@@ -567,7 +567,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
@@ -577,7 +577,7 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
@@ -587,7 +587,7 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
@@ -597,7 +597,7 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
@@ -607,7 +607,7 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
@@ -618,7 +618,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
@@ -629,7 +629,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
@@ -639,7 +639,7 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
@@ -649,7 +649,7 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
@@ -659,7 +659,7 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
@@ -669,7 +669,7 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
@@ -680,7 +680,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
@@ -690,7 +690,7 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
@@ -700,7 +700,7 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
@@ -710,7 +710,7 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
@@ -720,7 +720,7 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
@@ -730,7 +730,7 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
@@ -740,7 +740,7 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
@@ -750,7 +750,7 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
@@ -760,7 +760,7 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
@@ -771,7 +771,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
@@ -782,7 +782,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
@@ -793,7 +793,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
@@ -803,7 +803,7 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
@@ -813,7 +813,7 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
@@ -823,7 +823,7 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
@@ -833,7 +833,7 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
@@ -844,7 +844,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
@@ -855,7 +855,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
@@ -866,7 +866,7 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
@@ -877,7 +877,7 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
@@ -888,7 +888,7 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
@@ -899,7 +899,7 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
@@ -910,7 +910,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
@@ -921,7 +921,7 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
@@ -932,7 +932,7 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
@@ -943,7 +943,7 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
@@ -954,7 +954,7 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
@@ -965,7 +965,7 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
@@ -976,7 +976,7 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
@@ -987,7 +987,7 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
@@ -998,7 +998,7 @@ vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
@@ -1009,7 +1009,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
@@ -1020,7 +1020,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
@@ -1031,7 +1031,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
@@ -1042,7 +1042,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
@@ -1053,7 +1053,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
@@ -1064,7 +1064,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
@@ -1075,7 +1075,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
@@ -1086,7 +1086,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
index 05edc0718edb3..916c14745e0d7 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-overloaded/vwmacc.c
@@ -1056,7 +1056,7 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1066,7 +1066,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
@@ -1076,7 +1076,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1086,7 +1086,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
@@ -1096,7 +1096,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
@@ -1106,7 +1106,7 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
@@ -1116,7 +1116,7 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
@@ -1126,7 +1126,7 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
@@ -1136,7 +1136,7 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
@@ -1146,7 +1146,7 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
@@ -1156,7 +1156,7 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
@@ -1166,7 +1166,7 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
@@ -1176,7 +1176,7 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1187,7 +1187,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1197,7 +1197,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -1207,7 +1207,7 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
@@ -1217,7 +1217,7 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -1227,7 +1227,7 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
@@ -1237,7 +1237,7 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
@@ -1247,7 +1247,7 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
@@ -1257,7 +1257,7 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
@@ -1267,7 +1267,7 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
@@ -1277,7 +1277,7 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -1287,7 +1287,7 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
@@ -1297,7 +1297,7 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -1307,7 +1307,7 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
@@ -1317,7 +1317,7 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -1327,7 +1327,7 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
@@ -1337,7 +1337,7 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
@@ -1347,7 +1347,7 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
@@ -1357,7 +1357,7 @@ vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
@@ -1368,7 +1368,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
@@ -1378,7 +1378,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
@@ -1389,7 +1389,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
@@ -1399,7 +1399,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
@@ -1410,7 +1410,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
@@ -1420,7 +1420,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
@@ -1430,7 +1430,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
@@ -1440,7 +1440,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
@@ -1450,7 +1450,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
@@ -1460,7 +1460,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
@@ -1470,7 +1470,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
@@ -1480,7 +1480,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
@@ -1491,7 +1491,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
@@ -1502,7 +1502,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
@@ -1513,7 +1513,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
@@ -1523,7 +1523,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
@@ -1534,7 +1534,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
@@ -1544,7 +1544,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
@@ -1555,7 +1555,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
@@ -1565,7 +1565,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
@@ -1576,7 +1576,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
@@ -1586,7 +1586,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
@@ -1597,7 +1597,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
@@ -1607,7 +1607,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
@@ -1618,7 +1618,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
@@ -1628,7 +1628,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
@@ -1639,7 +1639,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
@@ -1649,7 +1649,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
@@ -1660,7 +1660,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
@@ -1670,7 +1670,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1681,7 +1681,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1691,7 +1691,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1702,7 +1702,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1712,7 +1712,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
@@ -1723,7 +1723,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
@@ -1733,7 +1733,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
@@ -1743,7 +1743,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
@@ -1753,7 +1753,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
@@ -1763,7 +1763,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
@@ -1773,7 +1773,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
@@ -1783,7 +1783,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
@@ -1793,7 +1793,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1804,7 +1804,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1815,7 +1815,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -1826,7 +1826,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
@@ -1836,7 +1836,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -1847,7 +1847,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
@@ -1857,7 +1857,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
@@ -1868,7 +1868,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
@@ -1878,7 +1878,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
@@ -1889,7 +1889,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
@@ -1899,7 +1899,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -1910,7 +1910,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
@@ -1920,7 +1920,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -1931,7 +1931,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
@@ -1941,7 +1941,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -1952,7 +1952,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
@@ -1962,7 +1962,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
@@ -1973,7 +1973,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
@@ -1983,7 +1983,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1993,7 +1993,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -2003,7 +2003,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
@@ -2013,7 +2013,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
@@ -2023,7 +2023,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
@@ -2033,7 +2033,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
@@ -2043,7 +2043,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -2054,7 +2054,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -2064,7 +2064,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -2074,7 +2074,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
@@ -2084,7 +2084,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
@@ -2094,7 +2094,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -2104,7 +2104,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -2114,7 +2114,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -2124,7 +2124,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
index 5f01c59536145..ab4ef84ccc257 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmacc.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfmacc_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
@@ -485,7 +485,7 @@ vfloat16m8_t test_vfmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -494,7 +494,7 @@ vfloat16mf4_t test_vfmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -503,7 +503,7 @@ vfloat16mf4_t test_vfmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -512,7 +512,7 @@ vfloat16mf2_t test_vfmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -521,7 +521,7 @@ vfloat16mf2_t test_vfmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat16m1_t test_vfmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -539,7 +539,7 @@ vfloat16m1_t test_vfmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -548,7 +548,7 @@ vfloat16m2_t test_vfmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -557,7 +557,7 @@ vfloat16m2_t test_vfmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat16m4_t test_vfmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat16m4_t test_vfmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmacc_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -584,7 +584,7 @@ vfloat16m8_t test_vfmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
 
 // CHECK-RV64-LABEL: @test_vfmacc_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
index 79e8d32e85d9f..29351df8302b9 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmadd.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfmadd_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
@@ -485,7 +485,7 @@ vfloat16m8_t test_vfmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -494,7 +494,7 @@ vfloat16mf4_t test_vfmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -503,7 +503,7 @@ vfloat16mf4_t test_vfmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -512,7 +512,7 @@ vfloat16mf2_t test_vfmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -521,7 +521,7 @@ vfloat16mf2_t test_vfmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat16m1_t test_vfmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -539,7 +539,7 @@ vfloat16m1_t test_vfmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -548,7 +548,7 @@ vfloat16m2_t test_vfmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -557,7 +557,7 @@ vfloat16m2_t test_vfmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat16m4_t test_vfmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat16m4_t test_vfmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmadd_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -584,7 +584,7 @@ vfloat16m8_t test_vfmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
 
 // CHECK-RV64-LABEL: @test_vfmadd_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
index d8e380ec8b20e..39458d19f069e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsac.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfmsac_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
@@ -485,7 +485,7 @@ vfloat16m8_t test_vfmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -494,7 +494,7 @@ vfloat16mf4_t test_vfmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -503,7 +503,7 @@ vfloat16mf4_t test_vfmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -512,7 +512,7 @@ vfloat16mf2_t test_vfmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -521,7 +521,7 @@ vfloat16mf2_t test_vfmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat16m1_t test_vfmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -539,7 +539,7 @@ vfloat16m1_t test_vfmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -548,7 +548,7 @@ vfloat16m2_t test_vfmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -557,7 +557,7 @@ vfloat16m2_t test_vfmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat16m4_t test_vfmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat16m4_t test_vfmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmsac_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -584,7 +584,7 @@ vfloat16m8_t test_vfmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
 
 // CHECK-RV64-LABEL: @test_vfmsac_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
index 529cb80d3b8f8..9bd7a68151460 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfmsub.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfmsub_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -209,7 +209,7 @@ vfloat32mf2_t test_vfmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -220,7 +220,7 @@ vfloat32m1_t test_vfmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
@@ -230,7 +230,7 @@ vfloat32m1_t test_vfmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -241,7 +241,7 @@ vfloat32m2_t test_vfmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
@@ -251,7 +251,7 @@ vfloat32m2_t test_vfmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -262,7 +262,7 @@ vfloat32m4_t test_vfmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -272,7 +272,7 @@ vfloat32m4_t test_vfmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -283,7 +283,7 @@ vfloat32m8_t test_vfmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -293,7 +293,7 @@ vfloat32m8_t test_vfmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -304,7 +304,7 @@ vfloat64m1_t test_vfmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -314,7 +314,7 @@ vfloat64m1_t test_vfmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -325,7 +325,7 @@ vfloat64m2_t test_vfmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -335,7 +335,7 @@ vfloat64m2_t test_vfmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -346,7 +346,7 @@ vfloat64m4_t test_vfmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -356,7 +356,7 @@ vfloat64m4_t test_vfmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -367,7 +367,7 @@ vfloat64m8_t test_vfmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, double op1,
@@ -485,7 +485,7 @@ vfloat16m8_t test_vfmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t v
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -494,7 +494,7 @@ vfloat16mf4_t test_vfmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -503,7 +503,7 @@ vfloat16mf4_t test_vfmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -512,7 +512,7 @@ vfloat16mf2_t test_vfmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat1
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -521,7 +521,7 @@ vfloat16mf2_t test_vfmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float1
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -530,7 +530,7 @@ vfloat16m1_t test_vfmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -539,7 +539,7 @@ vfloat16m1_t test_vfmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -548,7 +548,7 @@ vfloat16m2_t test_vfmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -557,7 +557,7 @@ vfloat16m2_t test_vfmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -566,7 +566,7 @@ vfloat16m4_t test_vfmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -575,7 +575,7 @@ vfloat16m4_t test_vfmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs
 
 // CHECK-RV64-LABEL: @test_vfmsub_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -584,7 +584,7 @@ vfloat16m8_t test_vfmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_
 
 // CHECK-RV64-LABEL: @test_vfmsub_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
index 192a5619332f2..703d7aa097f7c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmacc.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfnmacc_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfnmacc_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -210,7 +210,7 @@ vfloat32mf2_t test_vfnmacc_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -221,7 +221,7 @@ vfloat32m1_t test_vfnmacc_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -231,7 +231,7 @@ vfloat32m1_t test_vfnmacc_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -242,7 +242,7 @@ vfloat32m2_t test_vfnmacc_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -252,7 +252,7 @@ vfloat32m2_t test_vfnmacc_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -263,7 +263,7 @@ vfloat32m4_t test_vfnmacc_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -273,7 +273,7 @@ vfloat32m4_t test_vfnmacc_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -284,7 +284,7 @@ vfloat32m8_t test_vfnmacc_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmacc.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -294,7 +294,7 @@ vfloat32m8_t test_vfnmacc_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -305,7 +305,7 @@ vfloat64m1_t test_vfnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -315,7 +315,7 @@ vfloat64m1_t test_vfnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -326,7 +326,7 @@ vfloat64m2_t test_vfnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -336,7 +336,7 @@ vfloat64m2_t test_vfnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -347,7 +347,7 @@ vfloat64m4_t test_vfnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -357,7 +357,7 @@ vfloat64m4_t test_vfnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmacc.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -486,7 +486,7 @@ vfloat16m8_t test_vfnmacc_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -495,7 +495,7 @@ vfloat16mf4_t test_vfnmacc_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -504,7 +504,7 @@ vfloat16mf4_t test_vfnmacc_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -513,7 +513,7 @@ vfloat16mf2_t test_vfnmacc_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -522,7 +522,7 @@ vfloat16mf2_t test_vfnmacc_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -531,7 +531,7 @@ vfloat16m1_t test_vfnmacc_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat16m1_t test_vfnmacc_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -549,7 +549,7 @@ vfloat16m2_t test_vfnmacc_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -558,7 +558,7 @@ vfloat16m2_t test_vfnmacc_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -567,7 +567,7 @@ vfloat16m4_t test_vfnmacc_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -576,7 +576,7 @@ vfloat16m4_t test_vfnmacc_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -585,7 +585,7 @@ vfloat16m8_t test_vfnmacc_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vfnmacc_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmacc.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmacc_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
index 0e0ae186b87dd..adefa17bb4f37 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmadd.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfnmadd_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfnmadd_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -210,7 +210,7 @@ vfloat32mf2_t test_vfnmadd_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -221,7 +221,7 @@ vfloat32m1_t test_vfnmadd_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -231,7 +231,7 @@ vfloat32m1_t test_vfnmadd_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -242,7 +242,7 @@ vfloat32m2_t test_vfnmadd_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -252,7 +252,7 @@ vfloat32m2_t test_vfnmadd_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -263,7 +263,7 @@ vfloat32m4_t test_vfnmadd_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -273,7 +273,7 @@ vfloat32m4_t test_vfnmadd_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -284,7 +284,7 @@ vfloat32m8_t test_vfnmadd_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmadd.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -294,7 +294,7 @@ vfloat32m8_t test_vfnmadd_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -305,7 +305,7 @@ vfloat64m1_t test_vfnmadd_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -315,7 +315,7 @@ vfloat64m1_t test_vfnmadd_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -326,7 +326,7 @@ vfloat64m2_t test_vfnmadd_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -336,7 +336,7 @@ vfloat64m2_t test_vfnmadd_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -347,7 +347,7 @@ vfloat64m4_t test_vfnmadd_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -357,7 +357,7 @@ vfloat64m4_t test_vfnmadd_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfnmadd_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmadd.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmadd_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -486,7 +486,7 @@ vfloat16m8_t test_vfnmadd_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -495,7 +495,7 @@ vfloat16mf4_t test_vfnmadd_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -504,7 +504,7 @@ vfloat16mf4_t test_vfnmadd_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -513,7 +513,7 @@ vfloat16mf2_t test_vfnmadd_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -522,7 +522,7 @@ vfloat16mf2_t test_vfnmadd_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -531,7 +531,7 @@ vfloat16m1_t test_vfnmadd_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat16m1_t test_vfnmadd_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -549,7 +549,7 @@ vfloat16m2_t test_vfnmadd_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -558,7 +558,7 @@ vfloat16m2_t test_vfnmadd_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -567,7 +567,7 @@ vfloat16m4_t test_vfnmadd_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -576,7 +576,7 @@ vfloat16m4_t test_vfnmadd_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -585,7 +585,7 @@ vfloat16m8_t test_vfnmadd_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vfnmadd_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmadd.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmadd_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
index 11d3b9816df4d..c9f805328a107 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsac.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfnmsac_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfnmsac_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -210,7 +210,7 @@ vfloat32mf2_t test_vfnmsac_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -221,7 +221,7 @@ vfloat32m1_t test_vfnmsac_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -231,7 +231,7 @@ vfloat32m1_t test_vfnmsac_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -242,7 +242,7 @@ vfloat32m2_t test_vfnmsac_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -252,7 +252,7 @@ vfloat32m2_t test_vfnmsac_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -263,7 +263,7 @@ vfloat32m4_t test_vfnmsac_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -273,7 +273,7 @@ vfloat32m4_t test_vfnmsac_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -284,7 +284,7 @@ vfloat32m8_t test_vfnmsac_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsac.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -294,7 +294,7 @@ vfloat32m8_t test_vfnmsac_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -305,7 +305,7 @@ vfloat64m1_t test_vfnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -315,7 +315,7 @@ vfloat64m1_t test_vfnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -326,7 +326,7 @@ vfloat64m2_t test_vfnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -336,7 +336,7 @@ vfloat64m2_t test_vfnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -347,7 +347,7 @@ vfloat64m4_t test_vfnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -357,7 +357,7 @@ vfloat64m4_t test_vfnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsac.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -486,7 +486,7 @@ vfloat16m8_t test_vfnmsac_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -495,7 +495,7 @@ vfloat16mf4_t test_vfnmsac_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -504,7 +504,7 @@ vfloat16mf4_t test_vfnmsac_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -513,7 +513,7 @@ vfloat16mf2_t test_vfnmsac_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -522,7 +522,7 @@ vfloat16mf2_t test_vfnmsac_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -531,7 +531,7 @@ vfloat16m1_t test_vfnmsac_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -540,7 +540,7 @@ vfloat16m1_t test_vfnmsac_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -549,7 +549,7 @@ vfloat16m2_t test_vfnmsac_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -558,7 +558,7 @@ vfloat16m2_t test_vfnmsac_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -567,7 +567,7 @@ vfloat16m4_t test_vfnmsac_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -576,7 +576,7 @@ vfloat16m4_t test_vfnmsac_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -585,7 +585,7 @@ vfloat16m8_t test_vfnmsac_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vfnmsac_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsac.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmsac_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
index bdb90cb40e609..c2254419baa54 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfnmsub.c
@@ -188,7 +188,7 @@ vfloat64m8_t test_vfnmsub_vf_f64m8(vfloat64m8_t acc, double op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -199,7 +199,7 @@ vfloat32mf2_t test_vfnmsub_vv_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32.i64(<vscale x 1 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
@@ -210,7 +210,7 @@ vfloat32mf2_t test_vfnmsub_vf_f32mf2_m(vbool64_t mask, vfloat32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -221,7 +221,7 @@ vfloat32m1_t test_vfnmsub_vv_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32.i64(<vscale x 2 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
@@ -231,7 +231,7 @@ vfloat32m1_t test_vfnmsub_vf_f32m1_m(vbool32_t mask, vfloat32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -242,7 +242,7 @@ vfloat32m2_t test_vfnmsub_vv_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32.i64(<vscale x 4 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
@@ -252,7 +252,7 @@ vfloat32m2_t test_vfnmsub_vf_f32m2_m(vbool16_t mask, vfloat32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
@@ -263,7 +263,7 @@ vfloat32m4_t test_vfnmsub_vv_f32m4_m(vbool8_t mask, vfloat32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32.i64(<vscale x 8 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
@@ -273,7 +273,7 @@ vfloat32m4_t test_vfnmsub_vf_f32m4_m(vbool8_t mask, vfloat32m4_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[ACC:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
@@ -284,7 +284,7 @@ vfloat32m8_t test_vfnmsub_vv_f32m8_m(vbool4_t mask, vfloat32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfnmsub.mask.nxv16f32.f32.i64(<vscale x 16 x float> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
@@ -294,7 +294,7 @@ vfloat32m8_t test_vfnmsub_vf_f32m8_m(vbool4_t mask, vfloat32m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -305,7 +305,7 @@ vfloat64m1_t test_vfnmsub_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64.i64(<vscale x 1 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -315,7 +315,7 @@ vfloat64m1_t test_vfnmsub_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -326,7 +326,7 @@ vfloat64m2_t test_vfnmsub_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64.i64(<vscale x 2 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -336,7 +336,7 @@ vfloat64m2_t test_vfnmsub_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -347,7 +347,7 @@ vfloat64m4_t test_vfnmsub_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64.i64(<vscale x 4 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -357,7 +357,7 @@ vfloat64m4_t test_vfnmsub_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -368,7 +368,7 @@ vfloat64m8_t test_vfnmsub_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfnmsub.mask.nxv8f64.f64.i64(<vscale x 8 x double> [[ACC:%.*]], double [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfnmsub_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -492,7 +492,7 @@ vfloat16m8_t test_vfnmsub_vf_f16m8 (vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -501,7 +501,7 @@ vfloat16mf4_t test_vfnmsub_vv_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16.i64(<vscale x 1 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float16 rs1, vfloat16mf4_t vs2, size_t vl) {
@@ -510,7 +510,7 @@ vfloat16mf4_t test_vfnmsub_vf_f16mf4_m (vbool64_t mask, vfloat16mf4_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -519,7 +519,7 @@ vfloat16mf2_t test_vfnmsub_vv_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16.i64(<vscale x 2 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float16 rs1, vfloat16mf2_t vs2, size_t vl) {
@@ -528,7 +528,7 @@ vfloat16mf2_t test_vfnmsub_vf_f16mf2_m (vbool32_t mask, vfloat16mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -537,7 +537,7 @@ vfloat16m1_t test_vfnmsub_vv_f16m1_m (vbool16_t mask, vfloat16m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16.i64(<vscale x 4 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16 rs1, vfloat16m1_t vs2, size_t vl) {
@@ -546,7 +546,7 @@ vfloat16m1_t test_vfnmsub_vf_f16m1_m (vbool16_t mask, vfloat16m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -555,7 +555,7 @@ vfloat16m2_t test_vfnmsub_vv_f16m2_m (vbool8_t mask, vfloat16m2_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16.i64(<vscale x 8 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 rs1, vfloat16m2_t vs2, size_t vl) {
@@ -564,7 +564,7 @@ vfloat16m2_t test_vfnmsub_vf_f16m2_m (vbool8_t mask, vfloat16m2_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -573,7 +573,7 @@ vfloat16m4_t test_vfnmsub_vv_f16m4_m (vbool4_t mask, vfloat16m4_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16.i64(<vscale x 16 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 rs1, vfloat16m4_t vs2, size_t vl) {
@@ -582,7 +582,7 @@ vfloat16m4_t test_vfnmsub_vf_f16m4_m (vbool4_t mask, vfloat16m4_t vd, _Float16 r
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vv_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8_t vs1, vfloat16m8_t vs2, size_t vl) {
@@ -591,7 +591,7 @@ vfloat16m8_t test_vfnmsub_vv_f16m8_m (vbool2_t mask, vfloat16m8_t vd, vfloat16m8
 
 // CHECK-RV64-LABEL: @test_vfnmsub_vf_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfnmsub.mask.nxv32f16.f16.i64(<vscale x 32 x half> [[VD:%.*]], half [[RS1:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vfnmsub_vf_f16m8_m (vbool2_t mask, vfloat16m8_t vd, _Float16 rs1, vfloat16m8_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
index 51c3a6edaaa07..c43b9ad466f72 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmacc.c
@@ -88,7 +88,7 @@ vfloat64m8_t test_vfwmacc_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -99,7 +99,7 @@ vfloat64m1_t test_vfwmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -109,7 +109,7 @@ vfloat64m1_t test_vfwmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -120,7 +120,7 @@ vfloat64m2_t test_vfwmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -141,7 +141,7 @@ vfloat64m4_t test_vfwmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfwmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -162,7 +162,7 @@ vfloat64m8_t test_vfwmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
@@ -262,7 +262,7 @@ vfloat32m8_t test_vfwmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -271,7 +271,7 @@ vfloat32mf2_t test_vfwmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat32mf2_t test_vfwmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -289,7 +289,7 @@ vfloat32m1_t test_vfwmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -298,7 +298,7 @@ vfloat32m1_t test_vfwmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -307,7 +307,7 @@ vfloat32m2_t test_vfwmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@@ -316,7 +316,7 @@ vfloat32m2_t test_vfwmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -325,7 +325,7 @@ vfloat32m4_t test_vfwmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@@ -334,7 +334,7 @@ vfloat32m4_t test_vfwmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 v
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -343,7 +343,7 @@ vfloat32m8_t test_vfwmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfwmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
index f247017eb3ebc..e6cb4861a4abc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwmsac.c
@@ -88,7 +88,7 @@ vfloat64m8_t test_vfwmsac_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -99,7 +99,7 @@ vfloat64m1_t test_vfwmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -109,7 +109,7 @@ vfloat64m1_t test_vfwmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -120,7 +120,7 @@ vfloat64m2_t test_vfwmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -141,7 +141,7 @@ vfloat64m4_t test_vfwmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfwmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -162,7 +162,7 @@ vfloat64m8_t test_vfwmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc, float op1,
@@ -262,7 +262,7 @@ vfloat32m8_t test_vfwmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -271,7 +271,7 @@ vfloat32mf2_t test_vfwmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat32mf2_t test_vfwmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -289,7 +289,7 @@ vfloat32m1_t test_vfwmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -298,7 +298,7 @@ vfloat32m1_t test_vfwmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -307,7 +307,7 @@ vfloat32m2_t test_vfwmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@@ -316,7 +316,7 @@ vfloat32m2_t test_vfwmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -325,7 +325,7 @@ vfloat32m4_t test_vfwmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@@ -334,7 +334,7 @@ vfloat32m4_t test_vfwmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 v
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -343,7 +343,7 @@ vfloat32m8_t test_vfwmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4
 
 // CHECK-RV64-LABEL: @test_vfwmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
index f9d4cb6b60b84..cff04ff4bd30d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmacc.c
@@ -88,7 +88,7 @@ vfloat64m8_t test_vfwnmacc_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -99,7 +99,7 @@ vfloat64m1_t test_vfwnmacc_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -109,7 +109,7 @@ vfloat64m1_t test_vfwnmacc_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -120,7 +120,7 @@ vfloat64m2_t test_vfwnmacc_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmacc_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -141,7 +141,7 @@ vfloat64m4_t test_vfwnmacc_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfwnmacc_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -162,7 +162,7 @@ vfloat64m8_t test_vfwnmacc_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmacc_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -262,7 +262,7 @@ vfloat32m8_t test_vfwnmacc_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -271,7 +271,7 @@ vfloat32mf2_t test_vfwnmacc_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat32mf2_t test_vfwnmacc_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Floa
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -289,7 +289,7 @@ vfloat32m1_t test_vfwnmacc_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -298,7 +298,7 @@ vfloat32m1_t test_vfwnmacc_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -307,7 +307,7 @@ vfloat32m2_t test_vfwnmacc_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@@ -316,7 +316,7 @@ vfloat32m2_t test_vfwnmacc_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -325,7 +325,7 @@ vfloat32m4_t test_vfwnmacc_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@@ -334,7 +334,7 @@ vfloat32m4_t test_vfwnmacc_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -343,7 +343,7 @@ vfloat32m8_t test_vfwnmacc_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwnmacc_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwnmacc_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
index afe3e32691feb..d4a8239312bbd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vfwnmsac.c
@@ -88,7 +88,7 @@ vfloat64m8_t test_vfwnmsac_vf_f64m8(vfloat64m8_t acc, float op1,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -99,7 +99,7 @@ vfloat64m1_t test_vfwnmsac_vv_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32.nxv1f32.i64(<vscale x 1 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
@@ -109,7 +109,7 @@ vfloat64m1_t test_vfwnmsac_vf_f64m1_m(vbool64_t mask, vfloat64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -120,7 +120,7 @@ vfloat64m2_t test_vfwnmsac_vv_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32.nxv2f32.i64(<vscale x 2 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
@@ -130,7 +130,7 @@ vfloat64m2_t test_vfwnmsac_vf_f64m2_m(vbool32_t mask, vfloat64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -141,7 +141,7 @@ vfloat64m4_t test_vfwnmsac_vv_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32.nxv4f32.i64(<vscale x 4 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
@@ -151,7 +151,7 @@ vfloat64m4_t test_vfwnmsac_vf_f64m4_m(vbool16_t mask, vfloat64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -162,7 +162,7 @@ vfloat64m8_t test_vfwnmsac_vv_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32.nxv8f32.i64(<vscale x 8 x double> [[ACC:%.*]], float [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vfwnmsac_vf_f64m8_m(vbool8_t mask, vfloat64m8_t acc,
@@ -262,7 +262,7 @@ vfloat32m8_t test_vfwnmsac_vf_f32m8 (vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x half> [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloat16mf4_t vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -271,7 +271,7 @@ vfloat32mf2_t test_vfwnmsac_vv_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, vfloa
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16.nxv1f16.i64(<vscale x 1 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Float16 vs1, vfloat16mf4_t vs2, size_t vl) {
@@ -280,7 +280,7 @@ vfloat32mf2_t test_vfwnmsac_vf_f32mf2_m (vbool64_t mask, vfloat32mf2_t vd, _Floa
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x half> [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16mf2_t vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -289,7 +289,7 @@ vfloat32m1_t test_vfwnmsac_vv_f32m1_m (vbool32_t mask, vfloat32m1_t vd, vfloat16
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16.nxv2f16.i64(<vscale x 2 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16 vs1, vfloat16mf2_t vs2, size_t vl) {
@@ -298,7 +298,7 @@ vfloat32m1_t test_vfwnmsac_vf_f32m1_m (vbool32_t mask, vfloat32m1_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x half> [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16m1_t vs1, vfloat16m1_t vs2, size_t vl) {
@@ -307,7 +307,7 @@ vfloat32m2_t test_vfwnmsac_vv_f32m2_m (vbool16_t mask, vfloat32m2_t vd, vfloat16
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16.nxv4f16.i64(<vscale x 4 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16 vs1, vfloat16m1_t vs2, size_t vl) {
@@ -316,7 +316,7 @@ vfloat32m2_t test_vfwnmsac_vf_f32m2_m (vbool16_t mask, vfloat32m2_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x half> [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m2_t vs1, vfloat16m2_t vs2, size_t vl) {
@@ -325,7 +325,7 @@ vfloat32m4_t test_vfwnmsac_vv_f32m4_m (vbool8_t mask, vfloat32m4_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16.nxv8f16.i64(<vscale x 8 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16 vs1, vfloat16m2_t vs2, size_t vl) {
@@ -334,7 +334,7 @@ vfloat32m4_t test_vfwnmsac_vf_f32m4_m (vbool8_t mask, vfloat32m4_t vd, _Float16
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vv_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x half> [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m4_t vs1, vfloat16m4_t vs2, size_t vl) {
@@ -343,7 +343,7 @@ vfloat32m8_t test_vfwnmsac_vv_f32m8_m (vbool4_t mask, vfloat32m8_t vd, vfloat16m
 
 // CHECK-RV64-LABEL: @test_vfwnmsac_vf_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16.nxv16f16.i64(<vscale x 16 x float> [[VD:%.*]], half [[VS1:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vfwnmsac_vf_f32m8_m (vbool4_t mask, vfloat32m8_t vd, _Float16 vs1, vfloat16m4_t vs2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
index 478a67a16a5a0..243567cfab13f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmacc.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vmacc_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmacc_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmacc_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmacc_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmacc_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmacc_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmacc_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmacc_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmacc_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmacc_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmacc_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmacc_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmacc_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmacc_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmacc_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vmacc_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vmacc_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vmacc_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vmacc_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vmacc_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vmacc_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vmacc_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vmacc_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vmacc_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vmacc_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vmacc_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vmacc_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vmacc_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmacc.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vmacc_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vmacc_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vmacc_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vmacc_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vmacc_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vmacc_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vmacc_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vmacc_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vmacc_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vmacc_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vmacc_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vmacc_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmacc.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vmacc_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vmacc_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vmacc_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vmacc_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vmacc_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vmacc_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vmacc_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vmacc_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vmacc_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vmacc_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmacc.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vmacc_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vmacc_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vmacc_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vmacc_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vmacc_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vmacc_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vmacc_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmacc_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vmacc_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
 
 // CHECK-RV64-LABEL: @test_vmacc_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmacc.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmacc_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
index de7241136561b..9835a68f21651 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vmadd.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vmadd_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vmadd_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vmadd_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vmadd_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vmadd_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vmadd_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vmadd_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vmadd_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vmadd_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vmadd_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vmadd_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vmadd_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vmadd_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vmadd_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vmadd_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vmadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vmadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vmadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vmadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vmadd_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vmadd_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vmadd_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vmadd_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vmadd_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vmadd_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vmadd_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vmadd_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vmadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vmadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vmadd_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vmadd_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vmadd_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vmadd_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vmadd_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vmadd_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vmadd_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vmadd_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vmadd_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vmadd_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vmadd_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vmadd_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vmadd_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vmadd_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vmadd_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vmadd_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vmadd_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vmadd_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vmadd_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vmadd_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vmadd_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vmadd_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vmadd_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vmadd_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vmadd_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vmadd_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vmadd_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vmadd_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vmadd_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, v
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmadd.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vmadd_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuin
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vmadd_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vmadd_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vmadd_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vmadd_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vmadd_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vmadd_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vmadd_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vmadd_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vmadd_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vmadd_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vmadd_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmadd.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vmadd_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vmadd_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vmadd_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vmadd_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vmadd_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vmadd_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vmadd_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vmadd_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vmadd_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vmadd_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmadd.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vmadd_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vmadd_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vmadd_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vmadd_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vmadd_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vmadd_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t o
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vmadd_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1,
 
 // CHECK-RV64-LABEL: @test_vmadd_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vmadd_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op
 
 // CHECK-RV64-LABEL: @test_vmadd_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmadd.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vmadd_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
index 65b97bdeccd40..e0aa06746cc55 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsac.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vnmsac_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vnmsac_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vnmsac_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vnmsac_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vnmsac_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vnmsac_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vnmsac_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vnmsac_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vnmsac_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vnmsac_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vnmsac_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vnmsac_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vnmsac_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vnmsac_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vnmsac_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vnmsac_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vnmsac_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vnmsac_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vnmsac_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vnmsac_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vnmsac_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vnmsac_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vnmsac_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vnmsac_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vnmsac_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vnmsac_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vnmsac_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vnmsac_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vnmsac_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vnmsac_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vnmsac_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vnmsac_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vnmsac_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vnmsac_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vnmsac_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vnmsac_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vnmsac_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vnmsac_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vnmsac_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vnmsac_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vnmsac_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vnmsac_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vnmsac_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vnmsac_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vnmsac_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vnmsac_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vnmsac_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vnmsac_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vnmsac_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vnmsac_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vnmsac_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vnmsac_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vnmsac_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vnmsac_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vnmsac_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vnmsac_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vnmsac_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vnmsac_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsac.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vnmsac_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vnmsac_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vnmsac_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vnmsac_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vnmsac_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vnmsac_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vnmsac_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vnmsac_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vnmsac_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vnmsac_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vnmsac_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vnmsac_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsac.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vnmsac_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vnmsac_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vnmsac_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vnmsac_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vnmsac_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vnmsac_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vnmsac_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vnmsac_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vnmsac_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vnmsac_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsac.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vnmsac_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vnmsac_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vnmsac_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vnmsac_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vnmsac_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vnmsac_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vnmsac_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsac_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vnmsac_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsac_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsac.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsac_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
index 6bc003f150cda..9a409c7abb662 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vnmsub.c
@@ -798,7 +798,7 @@ vuint64m8_t test_vnmsub_vx_u64m8(vuint64m8_t acc, uint64_t op1, vuint64m8_t op2,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
@@ -807,7 +807,7 @@ vint8mf8_t test_vnmsub_vv_i8mf8_m(vbool64_t mask, vint8mf8_t acc, vint8mf8_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vint8mf8_t op2, size_t vl) {
@@ -816,7 +816,7 @@ vint8mf8_t test_vnmsub_vx_i8mf8_m(vbool64_t mask, vint8mf8_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
@@ -825,7 +825,7 @@ vint8mf4_t test_vnmsub_vv_i8mf4_m(vbool32_t mask, vint8mf4_t acc, vint8mf4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vint8mf4_t op2, size_t vl) {
@@ -834,7 +834,7 @@ vint8mf4_t test_vnmsub_vx_i8mf4_m(vbool32_t mask, vint8mf4_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
@@ -843,7 +843,7 @@ vint8mf2_t test_vnmsub_vv_i8mf2_m(vbool16_t mask, vint8mf2_t acc, vint8mf2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vint8mf2_t op2, size_t vl) {
@@ -852,7 +852,7 @@ vint8mf2_t test_vnmsub_vx_i8mf2_m(vbool16_t mask, vint8mf2_t acc, int8_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vint8m1_t op2, size_t vl) {
@@ -861,7 +861,7 @@ vint8m1_t test_vnmsub_vv_i8m1_m(vbool8_t mask, vint8m1_t acc, vint8m1_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m1_t op2, size_t vl) {
@@ -870,7 +870,7 @@ vint8m1_t test_vnmsub_vx_i8m1_m(vbool8_t mask, vint8m1_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vint8m2_t op2, size_t vl) {
@@ -879,7 +879,7 @@ vint8m2_t test_vnmsub_vv_i8m2_m(vbool4_t mask, vint8m2_t acc, vint8m2_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m2_t op2, size_t vl) {
@@ -888,7 +888,7 @@ vint8m2_t test_vnmsub_vx_i8m2_m(vbool4_t mask, vint8m2_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vint8m4_t op2, size_t vl) {
@@ -897,7 +897,7 @@ vint8m4_t test_vnmsub_vv_i8m4_m(vbool2_t mask, vint8m4_t acc, vint8m4_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m4_t op2, size_t vl) {
@@ -906,7 +906,7 @@ vint8m4_t test_vnmsub_vx_i8m4_m(vbool2_t mask, vint8m4_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vint8m8_t op2, size_t vl) {
@@ -915,7 +915,7 @@ vint8m8_t test_vnmsub_vv_i8m8_m(vbool1_t mask, vint8m8_t acc, vint8m8_t op1, vin
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m8_t op2, size_t vl) {
@@ -924,7 +924,7 @@ vint8m8_t test_vnmsub_vx_i8m8_m(vbool1_t mask, vint8m8_t acc, int8_t op1, vint8m
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
@@ -933,7 +933,7 @@ vint16mf4_t test_vnmsub_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc, vint16mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1, vint16mf4_t op2, size_t vl) {
@@ -942,7 +942,7 @@ vint16mf4_t test_vnmsub_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
@@ -951,7 +951,7 @@ vint16mf2_t test_vnmsub_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc, vint16mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1, vint16mf2_t op2, size_t vl) {
@@ -960,7 +960,7 @@ vint16mf2_t test_vnmsub_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1, vint16m1_t op2, size_t vl) {
@@ -969,7 +969,7 @@ vint16m1_t test_vnmsub_vv_i16m1_m(vbool16_t mask, vint16m1_t acc, vint16m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, vint16m1_t op2, size_t vl) {
@@ -978,7 +978,7 @@ vint16m1_t test_vnmsub_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int16_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1, vint16m2_t op2, size_t vl) {
@@ -987,7 +987,7 @@ vint16m2_t test_vnmsub_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vint16m2_t op2, size_t vl) {
@@ -996,7 +996,7 @@ vint16m2_t test_vnmsub_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1, vint16m4_t op2, size_t vl) {
@@ -1005,7 +1005,7 @@ vint16m4_t test_vnmsub_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vint16m4_t op2, size_t vl) {
@@ -1014,7 +1014,7 @@ vint16m4_t test_vnmsub_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1, vint16m8_t op2, size_t vl) {
@@ -1023,7 +1023,7 @@ vint16m8_t test_vnmsub_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint16m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vint16m8_t op2, size_t vl) {
@@ -1032,7 +1032,7 @@ vint16m8_t test_vnmsub_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int16_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
@@ -1041,7 +1041,7 @@ vint32mf2_t test_vnmsub_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc, vint32mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1, vint32mf2_t op2, size_t vl) {
@@ -1050,7 +1050,7 @@ vint32mf2_t test_vnmsub_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc, int32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1, vint32m1_t op2, size_t vl) {
@@ -1059,7 +1059,7 @@ vint32m1_t test_vnmsub_vv_i32m1_m(vbool32_t mask, vint32m1_t acc, vint32m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, vint32m1_t op2, size_t vl) {
@@ -1068,7 +1068,7 @@ vint32m1_t test_vnmsub_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1, vint32m2_t op2, size_t vl) {
@@ -1077,7 +1077,7 @@ vint32m2_t test_vnmsub_vv_i32m2_m(vbool16_t mask, vint32m2_t acc, vint32m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, vint32m2_t op2, size_t vl) {
@@ -1086,7 +1086,7 @@ vint32m2_t test_vnmsub_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int32_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1, vint32m4_t op2, size_t vl) {
@@ -1095,7 +1095,7 @@ vint32m4_t test_vnmsub_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vint32m4_t op2, size_t vl) {
@@ -1104,7 +1104,7 @@ vint32m4_t test_vnmsub_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1, vint32m8_t op2, size_t vl) {
@@ -1113,7 +1113,7 @@ vint32m8_t test_vnmsub_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint32m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vint32m8_t op2, size_t vl) {
@@ -1122,7 +1122,7 @@ vint32m8_t test_vnmsub_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int32_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1, vint64m1_t op2, size_t vl) {
@@ -1131,7 +1131,7 @@ vint64m1_t test_vnmsub_vv_i64m1_m(vbool64_t mask, vint64m1_t acc, vint64m1_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, vint64m1_t op2, size_t vl) {
@@ -1140,7 +1140,7 @@ vint64m1_t test_vnmsub_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1, vint64m2_t op2, size_t vl) {
@@ -1149,7 +1149,7 @@ vint64m2_t test_vnmsub_vv_i64m2_m(vbool32_t mask, vint64m2_t acc, vint64m2_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, vint64m2_t op2, size_t vl) {
@@ -1158,7 +1158,7 @@ vint64m2_t test_vnmsub_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1, vint64m4_t op2, size_t vl) {
@@ -1167,7 +1167,7 @@ vint64m4_t test_vnmsub_vv_i64m4_m(vbool16_t mask, vint64m4_t acc, vint64m4_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, vint64m4_t op2, size_t vl) {
@@ -1176,7 +1176,7 @@ vint64m4_t test_vnmsub_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int64_t op1, v
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1, vint64m8_t op2, size_t vl) {
@@ -1185,7 +1185,7 @@ vint64m8_t test_vnmsub_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint64m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vint64m8_t op2, size_t vl) {
@@ -1194,7 +1194,7 @@ vint64m8_t test_vnmsub_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int64_t op1, vi
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1203,7 +1203,7 @@ vuint8mf8_t test_vnmsub_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, vuint8mf8_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1, vuint8mf8_t op2, size_t vl) {
@@ -1212,7 +1212,7 @@ vuint8mf8_t test_vnmsub_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1221,7 +1221,7 @@ vuint8mf4_t test_vnmsub_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, vuint8mf4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1, vuint8mf4_t op2, size_t vl) {
@@ -1230,7 +1230,7 @@ vuint8mf4_t test_vnmsub_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1239,7 +1239,7 @@ vuint8mf2_t test_vnmsub_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, vuint8mf2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1, vuint8mf2_t op2, size_t vl) {
@@ -1248,7 +1248,7 @@ vuint8mf2_t test_vnmsub_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
@@ -1257,7 +1257,7 @@ vuint8m1_t test_vnmsub_vv_u8m1_m(vbool8_t mask, vuint8m1_t acc, vuint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vuint8m1_t op2, size_t vl) {
@@ -1266,7 +1266,7 @@ vuint8m1_t test_vnmsub_vx_u8m1_m(vbool8_t mask, vuint8m1_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
@@ -1275,7 +1275,7 @@ vuint8m2_t test_vnmsub_vv_u8m2_m(vbool4_t mask, vuint8m2_t acc, vuint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vuint8m2_t op2, size_t vl) {
@@ -1284,7 +1284,7 @@ vuint8m2_t test_vnmsub_vx_u8m2_m(vbool4_t mask, vuint8m2_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
@@ -1293,7 +1293,7 @@ vuint8m4_t test_vnmsub_vv_u8m4_m(vbool2_t mask, vuint8m4_t acc, vuint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vuint8m4_t op2, size_t vl) {
@@ -1302,7 +1302,7 @@ vuint8m4_t test_vnmsub_vx_u8m4_m(vbool2_t mask, vuint8m4_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[ACC:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1311,7 +1311,7 @@ vuint8m8_t test_vnmsub_vv_u8m8_m(vbool1_t mask, vuint8m8_t acc, vuint8m8_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vnmsub.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vuint8m8_t op2, size_t vl) {
@@ -1320,7 +1320,7 @@ vuint8m8_t test_vnmsub_vx_u8m8_m(vbool1_t mask, vuint8m8_t acc, uint8_t op1, vui
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1329,7 +1329,7 @@ vuint16mf4_t test_vnmsub_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t op1, vuint16mf4_t op2, size_t vl) {
@@ -1338,7 +1338,7 @@ vuint16mf4_t test_vnmsub_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1347,7 +1347,7 @@ vuint16mf2_t test_vnmsub_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, vuint16mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t op1, vuint16mf2_t op2, size_t vl) {
@@ -1356,7 +1356,7 @@ vuint16mf2_t test_vnmsub_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc, uint16_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
@@ -1365,7 +1365,7 @@ vuint16m1_t test_vnmsub_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc, vuint16m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1, vuint16m1_t op2, size_t vl) {
@@ -1374,7 +1374,7 @@ vuint16m1_t test_vnmsub_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc, uint16_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
@@ -1383,7 +1383,7 @@ vuint16m2_t test_vnmsub_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc, vuint16m2_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1, vuint16m2_t op2, size_t vl) {
@@ -1392,7 +1392,7 @@ vuint16m2_t test_vnmsub_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
@@ -1401,7 +1401,7 @@ vuint16m4_t test_vnmsub_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc, vuint16m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1, vuint16m4_t op2, size_t vl) {
@@ -1410,7 +1410,7 @@ vuint16m4_t test_vnmsub_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
@@ -1419,7 +1419,7 @@ vuint16m8_t test_vnmsub_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc, vuint16m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vnmsub.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1, vuint16m8_t op2, size_t vl) {
@@ -1428,7 +1428,7 @@ vuint16m8_t test_vnmsub_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1437,7 +1437,7 @@ vuint32mf2_t test_vnmsub_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, vuint32mf
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t op1, vuint32mf2_t op2, size_t vl) {
@@ -1446,7 +1446,7 @@ vuint32mf2_t test_vnmsub_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc, uint32_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
@@ -1455,7 +1455,7 @@ vuint32m1_t test_vnmsub_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc, vuint32m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1, vuint32m1_t op2, size_t vl) {
@@ -1464,7 +1464,7 @@ vuint32m1_t test_vnmsub_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
@@ -1473,7 +1473,7 @@ vuint32m2_t test_vnmsub_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc, vuint32m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1, vuint32m2_t op2, size_t vl) {
@@ -1482,7 +1482,7 @@ vuint32m2_t test_vnmsub_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc, uint32_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
@@ -1491,7 +1491,7 @@ vuint32m4_t test_vnmsub_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc, vuint32m4_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1, vuint32m4_t op2, size_t vl) {
@@ -1500,7 +1500,7 @@ vuint32m4_t test_vnmsub_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
@@ -1509,7 +1509,7 @@ vuint32m8_t test_vnmsub_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc, vuint32m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vnmsub.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1, vuint32m8_t op2, size_t vl) {
@@ -1518,7 +1518,7 @@ vuint32m8_t test_vnmsub_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc, uint32_t op1,
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
@@ -1527,7 +1527,7 @@ vuint64m1_t test_vnmsub_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc, vuint64m1_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1, vuint64m1_t op2, size_t vl) {
@@ -1536,7 +1536,7 @@ vuint64m1_t test_vnmsub_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
@@ -1545,7 +1545,7 @@ vuint64m2_t test_vnmsub_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc, vuint64m2_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1, vuint64m2_t op2, size_t vl) {
@@ -1554,7 +1554,7 @@ vuint64m2_t test_vnmsub_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
@@ -1563,7 +1563,7 @@ vuint64m4_t test_vnmsub_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc, vuint64m4_t
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1, vuint64m4_t op2, size_t vl) {
@@ -1572,7 +1572,7 @@ vuint64m4_t test_vnmsub_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc, uint64_t op1
 
 // CHECK-RV64-LABEL: @test_vnmsub_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
@@ -1581,7 +1581,7 @@ vuint64m8_t test_vnmsub_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc, vuint64m8_t o
 
 // CHECK-RV64-LABEL: @test_vnmsub_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vnmsub.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[ACC:%.*]], i64 [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vnmsub_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc, uint64_t op1, vuint64m8_t op2, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
index 86b7f9f0ff4a3..96b6233920f94 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslidedown.c
@@ -538,7 +538,7 @@ vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
@@ -549,7 +549,7 @@ vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
@@ -560,7 +560,7 @@ vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
@@ -571,7 +571,7 @@ vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
@@ -581,7 +581,7 @@ vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
@@ -591,7 +591,7 @@ vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
@@ -601,7 +601,7 @@ vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
@@ -611,7 +611,7 @@ vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
@@ -622,7 +622,7 @@ vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
@@ -633,7 +633,7 @@ vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
@@ -644,7 +644,7 @@ vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
@@ -655,7 +655,7 @@ vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
@@ -666,7 +666,7 @@ vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
@@ -677,7 +677,7 @@ vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
@@ -688,7 +688,7 @@ vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
@@ -699,7 +699,7 @@ vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
@@ -710,7 +710,7 @@ vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
@@ -721,7 +721,7 @@ vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
@@ -732,7 +732,7 @@ vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
@@ -743,7 +743,7 @@ vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
@@ -754,7 +754,7 @@ vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
@@ -765,7 +765,7 @@ vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
@@ -776,7 +776,7 @@ vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
@@ -787,7 +787,7 @@ vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
@@ -798,7 +798,7 @@ vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
@@ -809,7 +809,7 @@ vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
@@ -819,7 +819,7 @@ vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
@@ -829,7 +829,7 @@ vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
@@ -839,7 +839,7 @@ vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
@@ -849,7 +849,7 @@ vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
@@ -860,7 +860,7 @@ vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
@@ -871,7 +871,7 @@ vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
@@ -882,7 +882,7 @@ vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
@@ -893,7 +893,7 @@ vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
@@ -904,7 +904,7 @@ vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
@@ -915,7 +915,7 @@ vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
@@ -926,7 +926,7 @@ vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
@@ -937,7 +937,7 @@ vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
@@ -948,7 +948,7 @@ vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
@@ -959,7 +959,7 @@ vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
@@ -970,7 +970,7 @@ vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
@@ -981,7 +981,7 @@ vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
@@ -992,7 +992,7 @@ vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
@@ -1003,7 +1003,7 @@ vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
@@ -1014,7 +1014,7 @@ vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
@@ -1025,7 +1025,7 @@ vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
@@ -1036,7 +1036,7 @@ vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
@@ -1047,7 +1047,7 @@ vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
@@ -1058,7 +1058,7 @@ vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
@@ -1069,7 +1069,7 @@ vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
@@ -1080,7 +1080,7 @@ vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
@@ -1091,7 +1091,7 @@ vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
@@ -1102,7 +1102,7 @@ vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
@@ -1167,7 +1167,7 @@ vfloat16m8_t test_vslidedown_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -1176,7 +1176,7 @@ vfloat16mf4_t test_vslidedown_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, v
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -1185,7 +1185,7 @@ vfloat16mf2_t test_vslidedown_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, v
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -1194,7 +1194,7 @@ vfloat16m1_t test_vslidedown_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vflo
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -1203,7 +1203,7 @@ vfloat16m2_t test_vslidedown_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloa
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -1212,7 +1212,7 @@ vfloat16m4_t test_vslidedown_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloa
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslidedown_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
index ca7cada4e51b6..54718f2c10959 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vslideup.c
@@ -538,7 +538,7 @@ vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
@@ -548,7 +548,7 @@ vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
@@ -558,7 +558,7 @@ vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
@@ -568,7 +568,7 @@ vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
@@ -578,7 +578,7 @@ vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
@@ -588,7 +588,7 @@ vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
@@ -598,7 +598,7 @@ vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
@@ -608,7 +608,7 @@ vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
@@ -619,7 +619,7 @@ vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
@@ -630,7 +630,7 @@ vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
@@ -640,7 +640,7 @@ vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
@@ -650,7 +650,7 @@ vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
@@ -660,7 +660,7 @@ vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
@@ -670,7 +670,7 @@ vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
@@ -681,7 +681,7 @@ vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
@@ -691,7 +691,7 @@ vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
@@ -701,7 +701,7 @@ vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
@@ -711,7 +711,7 @@ vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
@@ -721,7 +721,7 @@ vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
@@ -731,7 +731,7 @@ vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
@@ -741,7 +741,7 @@ vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
@@ -751,7 +751,7 @@ vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
@@ -761,7 +761,7 @@ vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
 vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
@@ -772,7 +772,7 @@ vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
 vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
@@ -783,7 +783,7 @@ vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
 vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
@@ -794,7 +794,7 @@ vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
 vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
@@ -804,7 +804,7 @@ vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
 vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
@@ -814,7 +814,7 @@ vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
 vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
@@ -824,7 +824,7 @@ vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
 vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
@@ -834,7 +834,7 @@ vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
@@ -845,7 +845,7 @@ vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
@@ -856,7 +856,7 @@ vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
@@ -867,7 +867,7 @@ vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
@@ -878,7 +878,7 @@ vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
@@ -889,7 +889,7 @@ vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
@@ -900,7 +900,7 @@ vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
@@ -911,7 +911,7 @@ vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
@@ -922,7 +922,7 @@ vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
@@ -933,7 +933,7 @@ vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
@@ -944,7 +944,7 @@ vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
@@ -955,7 +955,7 @@ vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
@@ -966,7 +966,7 @@ vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
@@ -977,7 +977,7 @@ vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
@@ -988,7 +988,7 @@ vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
@@ -999,7 +999,7 @@ vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
 vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
@@ -1010,7 +1010,7 @@ vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
 vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
@@ -1021,7 +1021,7 @@ vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
 vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
@@ -1032,7 +1032,7 @@ vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
 vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
@@ -1043,7 +1043,7 @@ vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
 vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
@@ -1054,7 +1054,7 @@ vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
 vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
@@ -1065,7 +1065,7 @@ vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
 vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
@@ -1076,7 +1076,7 @@ vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
 vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
@@ -1087,7 +1087,7 @@ vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
 vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
@@ -1152,7 +1152,7 @@ vfloat16m8_t test_vslideup_vx_f16m8 (vfloat16m8_t dest, vfloat16m8_t src, size_t
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
 vfloat16mf4_t test_vslideup_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
@@ -1161,7 +1161,7 @@ vfloat16mf4_t test_vslideup_vx_f16mf4_m (vbool64_t mask, vfloat16mf4_t dest, vfl
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
 vfloat16mf2_t test_vslideup_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
@@ -1170,7 +1170,7 @@ vfloat16mf2_t test_vslideup_vx_f16mf2_m (vbool32_t mask, vfloat16mf2_t dest, vfl
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
 vfloat16m1_t test_vslideup_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
@@ -1179,7 +1179,7 @@ vfloat16m1_t test_vslideup_vx_f16m1_m (vbool16_t mask, vfloat16m1_t dest, vfloat
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
 vfloat16m2_t test_vslideup_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
@@ -1188,7 +1188,7 @@ vfloat16m2_t test_vslideup_vx_f16m2_m (vbool8_t mask, vfloat16m2_t dest, vfloat1
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
 vfloat16m4_t test_vslideup_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
@@ -1197,7 +1197,7 @@ vfloat16m4_t test_vslideup_vx_f16m4_m (vbool4_t mask, vfloat16m4_t dest, vfloat1
 
 // CHECK-RV64-LABEL: @test_vslideup_vx_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslideup.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
 vfloat16m8_t test_vslideup_vx_f16m8_m (vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
index a0f993feb7c4d..f8eb6e6d111a1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vwmacc.c
@@ -1056,7 +1056,7 @@ vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1066,7 +1066,7 @@ vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
@@ -1076,7 +1076,7 @@ vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1086,7 +1086,7 @@ vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
@@ -1096,7 +1096,7 @@ vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
@@ -1106,7 +1106,7 @@ vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
@@ -1116,7 +1116,7 @@ vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
@@ -1126,7 +1126,7 @@ vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
@@ -1136,7 +1136,7 @@ vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
@@ -1146,7 +1146,7 @@ vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
@@ -1156,7 +1156,7 @@ vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
@@ -1166,7 +1166,7 @@ vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
@@ -1176,7 +1176,7 @@ vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1187,7 +1187,7 @@ vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1197,7 +1197,7 @@ vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -1207,7 +1207,7 @@ vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
@@ -1217,7 +1217,7 @@ vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -1227,7 +1227,7 @@ vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
@@ -1237,7 +1237,7 @@ vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
@@ -1247,7 +1247,7 @@ vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
@@ -1257,7 +1257,7 @@ vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
@@ -1267,7 +1267,7 @@ vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
@@ -1277,7 +1277,7 @@ vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -1287,7 +1287,7 @@ vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
@@ -1297,7 +1297,7 @@ vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -1307,7 +1307,7 @@ vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
@@ -1317,7 +1317,7 @@ vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -1327,7 +1327,7 @@ vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
@@ -1337,7 +1337,7 @@ vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
@@ -1347,7 +1347,7 @@ vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
@@ -1357,7 +1357,7 @@ vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
@@ -1368,7 +1368,7 @@ vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
@@ -1378,7 +1378,7 @@ vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
@@ -1389,7 +1389,7 @@ vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
@@ -1399,7 +1399,7 @@ vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
@@ -1410,7 +1410,7 @@ vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
@@ -1420,7 +1420,7 @@ vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
@@ -1430,7 +1430,7 @@ vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
@@ -1440,7 +1440,7 @@ vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
@@ -1450,7 +1450,7 @@ vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
@@ -1460,7 +1460,7 @@ vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
@@ -1470,7 +1470,7 @@ vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
@@ -1480,7 +1480,7 @@ vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
@@ -1491,7 +1491,7 @@ vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
@@ -1502,7 +1502,7 @@ vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
@@ -1513,7 +1513,7 @@ vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
@@ -1523,7 +1523,7 @@ vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
@@ -1534,7 +1534,7 @@ vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
@@ -1544,7 +1544,7 @@ vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
@@ -1555,7 +1555,7 @@ vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
@@ -1565,7 +1565,7 @@ vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
@@ -1576,7 +1576,7 @@ vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
@@ -1586,7 +1586,7 @@ vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
@@ -1597,7 +1597,7 @@ vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
@@ -1607,7 +1607,7 @@ vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
@@ -1618,7 +1618,7 @@ vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
@@ -1628,7 +1628,7 @@ vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
@@ -1639,7 +1639,7 @@ vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
@@ -1649,7 +1649,7 @@ vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
@@ -1660,7 +1660,7 @@ vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
@@ -1670,7 +1670,7 @@ vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1681,7 +1681,7 @@ vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1691,7 +1691,7 @@ vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1702,7 +1702,7 @@ vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -1712,7 +1712,7 @@ vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
@@ -1723,7 +1723,7 @@ vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
@@ -1733,7 +1733,7 @@ vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
@@ -1743,7 +1743,7 @@ vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
@@ -1753,7 +1753,7 @@ vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
@@ -1763,7 +1763,7 @@ vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
@@ -1773,7 +1773,7 @@ vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
@@ -1783,7 +1783,7 @@ vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
@@ -1793,7 +1793,7 @@ vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1804,7 +1804,7 @@ vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -1815,7 +1815,7 @@ vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -1826,7 +1826,7 @@ vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
@@ -1836,7 +1836,7 @@ vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -1847,7 +1847,7 @@ vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
@@ -1857,7 +1857,7 @@ vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
@@ -1868,7 +1868,7 @@ vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
@@ -1878,7 +1878,7 @@ vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
@@ -1889,7 +1889,7 @@ vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
@@ -1899,7 +1899,7 @@ vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -1910,7 +1910,7 @@ vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
@@ -1920,7 +1920,7 @@ vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -1931,7 +1931,7 @@ vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
@@ -1941,7 +1941,7 @@ vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -1952,7 +1952,7 @@ vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
@@ -1962,7 +1962,7 @@ vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
@@ -1973,7 +1973,7 @@ vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
@@ -1983,7 +1983,7 @@ vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
 vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
@@ -1993,7 +1993,7 @@ vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
 vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
@@ -2003,7 +2003,7 @@ vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
 vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
@@ -2013,7 +2013,7 @@ vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
 vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
@@ -2023,7 +2023,7 @@ vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
 vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
@@ -2033,7 +2033,7 @@ vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
 vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
@@ -2043,7 +2043,7 @@ vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
 vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
@@ -2054,7 +2054,7 @@ vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
 vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
@@ -2064,7 +2064,7 @@ vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
 vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
@@ -2074,7 +2074,7 @@ vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
 vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
@@ -2084,7 +2084,7 @@ vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
 vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
@@ -2094,7 +2094,7 @@ vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
 vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
@@ -2104,7 +2104,7 @@ vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
 vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
@@ -2114,7 +2114,7 @@ vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
 vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
@@ -2124,7 +2124,7 @@ vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
 
 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
 vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,

diff  --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 6780436bd701a..5e44fb900c0ab 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -669,8 +669,9 @@ let TargetPrefix = "riscv" in {
   class RISCVTernaryAAAXMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyint_ty,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>],
-                    [IntrNoMem]>, RISCVVIntrinsic {
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                     LLVMMatchType<1>, LLVMMatchType<1>],
+                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
     let VLOperand = 4;
   }
   class RISCVTernaryAAXANoMask
@@ -681,11 +682,13 @@ let TargetPrefix = "riscv" in {
     let SplatOperand = 1;
     let VLOperand = 3;
   }
+  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
   class RISCVTernaryAAXAMask
         : Intrinsic<[llvm_anyvector_ty],
                     [LLVMMatchType<0>, llvm_any_ty, LLVMMatchType<0>,
-                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
-                    [IntrNoMem]>, RISCVVIntrinsic {
+                     LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                     llvm_anyint_ty, LLVMMatchType<2>],
+                    [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
     let SplatOperand = 1;
     let VLOperand = 4;
   }
@@ -697,11 +700,13 @@ let TargetPrefix = "riscv" in {
     let SplatOperand = 1;
     let VLOperand = 3;
   }
+  // Input: (vector_in, vector_in/scalar, vector_in, mask, vl, policy
   class RISCVTernaryWideMask
         : Intrinsic< [llvm_anyvector_ty],
                      [LLVMMatchType<0>, llvm_any_ty, llvm_anyvector_ty,
-                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty],
-                     [IntrNoMem]>, RISCVVIntrinsic {
+                      LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                      llvm_anyint_ty, LLVMMatchType<3>],
+                     [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
     let SplatOperand = 1;
     let VLOperand = 4;
   }

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
index 41fa5087e9303..25b02824c1450 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -1065,10 +1065,10 @@ class VPseudoBinaryMask<VReg RetClass,
   let BaseInstr = !cast<Instruction>(PseudoToVInst<NAME>.VInst);
 }
 
-class VPseudoBinaryMaskTA<VReg RetClass,
-                          RegisterClass Op1Class,
-                          DAGOperand Op2Class,
-                          string Constraint> :
+class VPseudoBinaryMaskPolicy<VReg RetClass,
+                              RegisterClass Op1Class,
+                              DAGOperand Op2Class,
+                              string Constraint> :
         Pseudo<(outs GetVRegNoV0<RetClass>.R:$rd),
                 (ins GetVRegNoV0<RetClass>.R:$merge,
                      Op1Class:$rs2, Op2Class:$rs1,
@@ -1652,8 +1652,8 @@ multiclass VPseudoBinary<VReg RetClass,
   let VLMul = MInfo.value in {
     def "_" # MInfo.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
                                              Constraint>;
-    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
-                                                       Constraint>,
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
+                                                           Constraint>,
                                    RISCVMaskedPseudo</*MaskOpIdx*/ 3>;
   }
 }
@@ -1681,8 +1681,8 @@ multiclass VPseudoBinaryEmul<VReg RetClass,
   let VLMul = lmul.value in {
     def "_" # lmul.MX # "_" # emul.MX : VPseudoBinaryNoMask<RetClass, Op1Class, Op2Class,
                                                             Constraint>;
-    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskTA<RetClass, Op1Class, Op2Class,
-                                                                      Constraint>;
+    def "_" # lmul.MX # "_" # emul.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
+                                                                          Constraint>;
   }
 }
 
@@ -2333,6 +2333,19 @@ multiclass VPseudoTernary<VReg RetClass,
   }
 }
 
+multiclass VPseudoTernaryNoMaskNoPolicy<VReg RetClass,
+                                        RegisterClass Op1Class,
+                                        DAGOperand Op2Class,
+                                        LMULInfo MInfo,
+                                        string Constraint = ""> {
+  let VLMul = MInfo.value in {
+    def "_" # MInfo.MX : VPseudoTernaryNoMask<RetClass, Op1Class, Op2Class, Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class,
+                                                           Constraint>;
+                                   
+  }
+}
+
 multiclass VPseudoTernaryWithPolicy<VReg RetClass,
                                     RegisterClass Op1Class,
                                     DAGOperand Op2Class,
@@ -2342,7 +2355,7 @@ multiclass VPseudoTernaryWithPolicy<VReg RetClass,
   let VLMul = MInfo.value in {
     let isCommutable = Commutable in
     def "_" # MInfo.MX : VPseudoTernaryNoMaskWithPolicy<RetClass, Op1Class, Op2Class, Constraint>;
-    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMask<RetClass, Op1Class, Op2Class, Constraint>;
+    def "_" # MInfo.MX # "_MASK" : VPseudoBinaryMaskPolicy<RetClass, Op1Class, Op2Class, Constraint>;
   }
 }
 
@@ -2356,7 +2369,7 @@ multiclass VPseudoTernaryV_VV_AAXA<string Constraint = "",
 
 multiclass VPseudoTernaryV_VX<string Constraint = ""> {
   foreach m = MxList in
-    defm _VX : VPseudoTernary<m.vrclass, m.vrclass, GPR, m, Constraint>;
+    defm _VX : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, GPR, m, Constraint>;
 }
 
 multiclass VPseudoTernaryV_VX_AAXA<string Constraint = ""> {
@@ -2397,7 +2410,7 @@ multiclass VPseudoTernaryW_VF {
 
 multiclass VPseudoTernaryV_VI<Operand ImmType = simm5, string Constraint = ""> {
   foreach m = MxList in
-    defm _VI : VPseudoTernary<m.vrclass, m.vrclass, ImmType, m, Constraint>;
+    defm _VI : VPseudoTernaryNoMaskNoPolicy<m.vrclass, m.vrclass, ImmType, m, Constraint>;
 }
 
 multiclass VPseudoVMAC_VV_VX_AAXA<string Constraint = ""> {
@@ -3013,6 +3026,31 @@ class VPatTernaryMask<string intrinsic,
                     (mask_type V0),
                     GPR:$vl, sew)>;
 
+class VPatTernaryMaskPolicy<string intrinsic,
+                            string inst,
+                            string kind,
+                            ValueType result_type,
+                            ValueType op1_type,
+                            ValueType op2_type,
+                            ValueType mask_type,
+                            int sew,
+                            LMULInfo vlmul,
+                            VReg result_reg_class,
+                            RegisterClass op1_reg_class,
+                            DAGOperand op2_kind> :
+  Pat<(result_type (!cast<Intrinsic>(intrinsic#"_mask")
+                    (result_type result_reg_class:$rs3),
+                    (op1_type op1_reg_class:$rs1),
+                    (op2_type op2_kind:$rs2),
+                    (mask_type V0),
+                    VLOpFrag, (XLenVT timm:$policy))),
+                   (!cast<Instruction>(inst#"_"#kind#"_"#vlmul.MX # "_MASK")
+                    result_reg_class:$rs3,
+                    (op1_type op1_reg_class:$rs1),
+                    op2_kind:$rs2,
+                    (mask_type V0),
+                    GPR:$vl, sew, (XLenVT timm:$policy))>;
+
 multiclass VPatUnaryS_M<string intrinsic_name,
                              string inst>
 {
@@ -3584,6 +3622,26 @@ multiclass VPatTernary<string intrinsic,
                         op2_kind>;
 }
 
+multiclass VPatTernaryNoMaskNoPolicy<string intrinsic,
+                                     string inst,
+                                     string kind,
+                                     ValueType result_type,
+                                     ValueType op1_type,
+                                     ValueType op2_type,
+                                     ValueType mask_type,
+                                     int sew,
+                                     LMULInfo vlmul,
+                                     VReg result_reg_class,
+                                     RegisterClass op1_reg_class,
+                                     DAGOperand op2_kind> {
+  def : VPatTernaryNoMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
+                          sew, vlmul, result_reg_class, op1_reg_class,
+                          op2_kind>;
+  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
+                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
+                              op2_kind>;
+}
+
 multiclass VPatTernaryWithPolicy<string intrinsic,
                                  string inst,
                                  string kind,
@@ -3599,9 +3657,9 @@ multiclass VPatTernaryWithPolicy<string intrinsic,
   def : VPatTernaryNoMaskWithPolicy<intrinsic, inst, kind, result_type, op1_type,
                                     op2_type, sew, vlmul, result_reg_class,
                                     op1_reg_class, op2_kind>;
-  def : VPatTernaryMask<intrinsic, inst, kind, result_type, op1_type, op2_type,
-                        mask_type, sew, vlmul, result_reg_class, op1_reg_class,
-                        op2_kind>;
+  def : VPatTernaryMaskPolicy<intrinsic, inst, kind, result_type, op1_type, op2_type,
+                              mask_type, sew, vlmul, result_reg_class, op1_reg_class,
+                              op2_kind>;
 }
 
 multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
@@ -3616,10 +3674,10 @@ multiclass VPatTernaryV_VV_AAXA<string intrinsic, string instruction,
 multiclass VPatTernaryV_VX<string intrinsic, string instruction,
                            list<VTypeInfo> vtilist> {
   foreach vti = vtilist in
-    defm : VPatTernary<intrinsic, instruction, "VX",
-                       vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                       vti.Log2SEW, vti.LMul, vti.RegClass,
-                       vti.RegClass, GPR>;
+    defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VX",
+                                     vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                                     vti.Log2SEW, vti.LMul, vti.RegClass,
+                                     vti.RegClass, GPR>;
 }
 
 multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
@@ -3635,10 +3693,10 @@ multiclass VPatTernaryV_VX_AAXA<string intrinsic, string instruction,
 multiclass VPatTernaryV_VI<string intrinsic, string instruction,
                            list<VTypeInfo> vtilist, Operand Imm_type> {
   foreach vti = vtilist in
-    defm : VPatTernary<intrinsic, instruction, "VI",
-                      vti.Vector, vti.Vector, XLenVT, vti.Mask,
-                      vti.Log2SEW, vti.LMul, vti.RegClass,
-                      vti.RegClass, Imm_type>;
+    defm : VPatTernaryNoMaskNoPolicy<intrinsic, instruction, "VI",
+                                     vti.Vector, vti.Vector, XLenVT, vti.Mask,
+                                     vti.Log2SEW, vti.LMul, vti.RegClass,
+                                     vti.RegClass, Imm_type>;
 }
 
 multiclass VPatTernaryW_VV<string intrinsic, string instruction,

diff  --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
index 41bda9e241232..7f5630456afec 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td
@@ -1111,7 +1111,7 @@ foreach vti = AllFloatVectors in {
                                       VLOpFrag)),
             (!cast<Instruction>("PseudoVFMADD_VV_"# suffix #"_MASK")
                  vti.RegClass:$rd, vti.RegClass:$rs1, vti.RegClass:$rs2,
-                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED)>;
 
   def : Pat<(vti.Vector (riscv_fma_vl vti.RegClass:$rs1, vti.RegClass:$rd,
                                       (riscv_fneg_vl vti.RegClass:$rs2,
@@ -1161,7 +1161,7 @@ foreach vti = AllFloatVectors in {
                                        VLOpFrag)),
             (!cast<Instruction>("PseudoVFMADD_V" # vti.ScalarSuffix # "_" # suffix # "_MASK")
                  vti.RegClass:$rd, vti.ScalarRegClass:$rs1, vti.RegClass:$rs2,
-                 (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
+                 (vti.Mask V0), GPR:$vl, vti.Log2SEW, TAIL_UNDISTURBED)>;
 
   def : Pat<(vti.Vector (riscv_fma_vl (SplatFPOp vti.ScalarRegClass:$rs1),
                                        vti.RegClass:$rd,

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
index 5115a7548e2ce..02fa45976203b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmacc.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmacc.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmacc.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmacc.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmacc.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmacc.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmacc.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmacc.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmacc.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmacc.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmacc.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmacc.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmacc.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmacc_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
index 9313e440e500f..4b2ffbdc863e0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmadd.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmadd.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmadd.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmadd.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmadd.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmadd.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmadd.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmadd.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmadd.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmadd.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmadd.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmadd.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmadd.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmadd_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
index cf9df7550fcc7..6e7e3acadea40 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsac.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsac.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsac.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsac.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsac.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsac.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsac.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsac.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsac.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsac.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsac.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsac.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsac.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsac_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
index d071893ceb085..0f7a58a9480ac 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfmsub.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfmsub.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfmsub.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfmsub.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfmsub.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfmsub.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfmsub.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfmsub.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfmsub.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfmsub.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfmsub.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfmsub.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfmsub.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfmsub_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
index d46c29f3be78b..2d22dc269643c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmacc.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmacc.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmacc.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmacc.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmacc.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmacc.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmacc.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmacc.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmacc.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmacc.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmacc.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmacc.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmacc.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmacc_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
index 44810af5ab31b..4a7cf4d010256 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmadd.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmadd.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmadd.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmadd.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmadd.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmadd.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmadd.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmadd.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmadd.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmadd.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmadd.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmadd.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmadd.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmadd_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
index ff1bcfa86d3a8..dc77c03041dbd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsac.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsac.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsac.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsac.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsac.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsac.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsac.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsac.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsac.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsac.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsac.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsac.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsac.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsac_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
index e6ca32f34752c..6a3a7bbf37607 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfnmsub.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half>  @intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f16_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half>  @intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f16_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half>  @intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f16_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half>  @intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f16_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half>  @intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv16f16_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f32_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f32_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f32_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv8f32_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.nxv1f64(
   <vscale x 1 x double>,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv1f64_nxv1f64_nxv1f64:
@@ -458,7 +458,7 @@ entry:
     <vscale x 1 x double> %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.nxv2f64(
   <vscale x 2 x double>,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv2f64_nxv2f64_nxv2f64:
@@ -504,7 +504,7 @@ entry:
     <vscale x 2 x double> %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.nxv4f64(
   <vscale x 4 x double>,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vv_nxv4f64_nxv4f64_nxv4f64:
@@ -550,7 +550,7 @@ entry:
     <vscale x 4 x double> %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 1 x half> @llvm.riscv.vfnmsub.mask.nxv1f16.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x half> @intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16(<vscale x 1 x half> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f16_f16_nxv1f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x half> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 2 x half> @llvm.riscv.vfnmsub.mask.nxv2f16.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x half> @intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16(<vscale x 2 x half> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f16_f16_nxv2f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x half> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 4 x half> @llvm.riscv.vfnmsub.mask.nxv4f16.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x half> @intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16(<vscale x 4 x half> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f16_f16_nxv4f16:
@@ -688,7 +688,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x half> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 8 x half> @llvm.riscv.vfnmsub.mask.nxv8f16.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x half> @intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16(<vscale x 8 x half> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f16_f16_nxv8f16:
@@ -734,7 +734,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x half> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vfnmsub.mask.nxv16f16.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x half> @intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16(<vscale x 16 x half> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv16f16_f16_nxv16f16:
@@ -780,7 +780,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x half> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfnmsub.mask.nxv1f32.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32(<vscale x 1 x float> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f32_f32_nxv1f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -858,7 +858,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfnmsub.mask.nxv2f32.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32(<vscale x 2 x float> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f32_f32_nxv2f32:
@@ -872,7 +872,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -904,7 +904,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfnmsub.mask.nxv4f32.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32(<vscale x 4 x float> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f32_f32_nxv4f32:
@@ -918,7 +918,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -950,7 +950,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfnmsub.mask.nxv8f32.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32(<vscale x 8 x float> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv8f32_f32_nxv8f32:
@@ -964,7 +964,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -996,7 +996,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfnmsub.mask.nxv1f64.f64(
   double,
   <vscale x 1 x double>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64(<vscale x 1 x double> %0, double %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv1f64_f64_nxv1f64:
@@ -1010,7 +1010,7 @@ entry:
     double %1,
     <vscale x 1 x double> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -1042,7 +1042,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfnmsub.mask.nxv2f64.f64(
   double,
   <vscale x 2 x double>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64(<vscale x 2 x double> %0, double %1, <vscale x 2 x double> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv2f64_f64_nxv2f64:
@@ -1056,7 +1056,7 @@ entry:
     double %1,
     <vscale x 2 x double> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -1088,7 +1088,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfnmsub.mask.nxv4f64.f64(
   double,
   <vscale x 4 x double>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64(<vscale x 4 x double> %0, double %1, <vscale x 4 x double> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfnmsub_mask_vf_nxv4f64_f64_nxv4f64:
@@ -1102,7 +1102,7 @@ entry:
     double %1,
     <vscale x 4 x double> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
index f5db61b5e8c7c..6e9eec795aa35 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmacc.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float>  @intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double>  @intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmacc.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f32_f16_nxv1f16:
@@ -458,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmacc.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f32_f16_nxv2f16:
@@ -504,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmacc.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f32_f16_nxv4f16:
@@ -550,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmacc.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f32_f16_nxv8f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmacc.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv16f32_f16_nxv16f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmacc.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv1f64_f32_nxv1f32:
@@ -688,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmacc.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv2f64_f32_nxv2f32:
@@ -734,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmacc.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv4f64_f32_nxv4f32:
@@ -780,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmacc.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmacc_mask_vf_nxv8f64_f32_nxv8f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
index 884ee36575b4a..1e9d25d0884d3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwmsac.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float>  @intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double>  @intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwmsac.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f32_f16_nxv1f16:
@@ -458,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwmsac.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f32_f16_nxv2f16:
@@ -504,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwmsac.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f32_f16_nxv4f16:
@@ -550,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwmsac.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f32_f16_nxv8f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwmsac.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv16f32_f16_nxv16f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwmsac.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv1f64_f32_nxv1f32:
@@ -688,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwmsac.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv2f64_f32_nxv2f32:
@@ -734,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwmsac.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv4f64_f32_nxv4f32:
@@ -780,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwmsac.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwmsac_mask_vf_nxv8f64_f32_nxv8f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
index 4ccd0f8c55835..2542be6d4a813 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmacc.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f32_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f32_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f32_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f32_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float>  @intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv16f32_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv1f64_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv2f64_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv4f64_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double>  @intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vv_nxv8f64_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmacc.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f32_f16_nxv1f16:
@@ -458,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmacc.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f32_f16_nxv2f16:
@@ -504,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmacc.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f32_f16_nxv4f16:
@@ -550,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmacc.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f32_f16_nxv8f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmacc.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv16f32_f16_nxv16f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmacc.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv1f64_f32_nxv1f32:
@@ -688,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmacc.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv2f64_f32_nxv2f32:
@@ -734,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmacc.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv4f64_f32_nxv4f32:
@@ -780,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmacc.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmacc_mask_vf_nxv8f64_f32_nxv8f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
index 26fcb06d89167..ad05c7a68496e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfwnmsac.ll
@@ -30,7 +30,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.nxv1f16(
   <vscale x 1 x half>,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float>  @intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16(<vscale x 1 x float> %0, <vscale x 1 x half> %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f32_nxv1f16_nxv1f16:
@@ -44,7 +44,7 @@ entry:
     <vscale x 1 x half> %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -76,7 +76,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.nxv2f16(
   <vscale x 2 x half>,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float>  @intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f32_nxv2f16_nxv2f16:
@@ -90,7 +90,7 @@ entry:
     <vscale x 2 x half> %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -122,7 +122,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.nxv4f16(
   <vscale x 4 x half>,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float>  @intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16(<vscale x 4 x float> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f32_nxv4f16_nxv4f16:
@@ -136,7 +136,7 @@ entry:
     <vscale x 4 x half> %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -168,7 +168,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.nxv8f16(
   <vscale x 8 x half>,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float>  @intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16(<vscale x 8 x float> %0, <vscale x 8 x half> %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f32_nxv8f16_nxv8f16:
@@ -182,7 +182,7 @@ entry:
     <vscale x 8 x half> %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -214,7 +214,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.nxv16f16(
   <vscale x 16 x half>,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float>  @intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16(<vscale x 16 x float> %0, <vscale x 16 x half> %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv16f32_nxv16f16_nxv16f16:
@@ -228,7 +228,7 @@ entry:
     <vscale x 16 x half> %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -260,7 +260,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.nxv1f32(
   <vscale x 1 x float>,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double>  @intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv1f64_nxv1f32_nxv1f32:
@@ -274,7 +274,7 @@ entry:
     <vscale x 1 x float> %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -306,7 +306,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.nxv2f32(
   <vscale x 2 x float>,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double>  @intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32(<vscale x 2 x double> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv2f64_nxv2f32_nxv2f32:
@@ -320,7 +320,7 @@ entry:
     <vscale x 2 x float> %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -352,7 +352,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.nxv4f32(
   <vscale x 4 x float>,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double>  @intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32(<vscale x 4 x double> %0, <vscale x 4 x float> %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv4f64_nxv4f32_nxv4f32:
@@ -366,7 +366,7 @@ entry:
     <vscale x 4 x float> %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -398,7 +398,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.nxv8f32(
   <vscale x 8 x float>,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double>  @intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32(<vscale x 8 x double> %0, <vscale x 8 x float> %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vv_nxv8f64_nxv8f32_nxv8f32:
@@ -412,7 +412,7 @@ entry:
     <vscale x 8 x float> %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }
@@ -444,7 +444,7 @@ declare <vscale x 1 x float> @llvm.riscv.vfwnmsac.mask.nxv1f32.f16(
   half,
   <vscale x 1 x half>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x float> @intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16(<vscale x 1 x float> %0, half %1, <vscale x 1 x half> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f32_f16_nxv1f16:
@@ -458,7 +458,7 @@ entry:
     half %1,
     <vscale x 1 x half> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x float> %a
 }
@@ -490,7 +490,7 @@ declare <vscale x 2 x float> @llvm.riscv.vfwnmsac.mask.nxv2f32.f16(
   half,
   <vscale x 2 x half>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x float> @intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16(<vscale x 2 x float> %0, half %1, <vscale x 2 x half> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f32_f16_nxv2f16:
@@ -504,7 +504,7 @@ entry:
     half %1,
     <vscale x 2 x half> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x float> %a
 }
@@ -536,7 +536,7 @@ declare <vscale x 4 x float> @llvm.riscv.vfwnmsac.mask.nxv4f32.f16(
   half,
   <vscale x 4 x half>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x float> @intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16(<vscale x 4 x float> %0, half %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f32_f16_nxv4f16:
@@ -550,7 +550,7 @@ entry:
     half %1,
     <vscale x 4 x half> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x float> %a
 }
@@ -582,7 +582,7 @@ declare <vscale x 8 x float> @llvm.riscv.vfwnmsac.mask.nxv8f32.f16(
   half,
   <vscale x 8 x half>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x float> @intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16(<vscale x 8 x float> %0, half %1, <vscale x 8 x half> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f32_f16_nxv8f16:
@@ -596,7 +596,7 @@ entry:
     half %1,
     <vscale x 8 x half> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x float> %a
 }
@@ -628,7 +628,7 @@ declare <vscale x 16 x float> @llvm.riscv.vfwnmsac.mask.nxv16f32.f16(
   half,
   <vscale x 16 x half>,
   <vscale x 16 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 16 x float> @intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16(<vscale x 16 x float> %0, half %1, <vscale x 16 x half> %2, <vscale x 16 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv16f32_f16_nxv16f16:
@@ -642,7 +642,7 @@ entry:
     half %1,
     <vscale x 16 x half> %2,
     <vscale x 16 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 16 x float> %a
 }
@@ -674,7 +674,7 @@ declare <vscale x 1 x double> @llvm.riscv.vfwnmsac.mask.nxv1f64.f32(
   float,
   <vscale x 1 x float>,
   <vscale x 1 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 1 x double> @intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32(<vscale x 1 x double> %0, float %1, <vscale x 1 x float> %2, <vscale x 1 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv1f64_f32_nxv1f32:
@@ -688,7 +688,7 @@ entry:
     float %1,
     <vscale x 1 x float> %2,
     <vscale x 1 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 1 x double> %a
 }
@@ -720,7 +720,7 @@ declare <vscale x 2 x double> @llvm.riscv.vfwnmsac.mask.nxv2f64.f32(
   float,
   <vscale x 2 x float>,
   <vscale x 2 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 2 x double> @intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32(<vscale x 2 x double> %0, float %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv2f64_f32_nxv2f32:
@@ -734,7 +734,7 @@ entry:
     float %1,
     <vscale x 2 x float> %2,
     <vscale x 2 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 2 x double> %a
 }
@@ -766,7 +766,7 @@ declare <vscale x 4 x double> @llvm.riscv.vfwnmsac.mask.nxv4f64.f32(
   float,
   <vscale x 4 x float>,
   <vscale x 4 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 4 x double> @intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32(<vscale x 4 x double> %0, float %1, <vscale x 4 x float> %2, <vscale x 4 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv4f64_f32_nxv4f32:
@@ -780,7 +780,7 @@ entry:
     float %1,
     <vscale x 4 x float> %2,
     <vscale x 4 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 4 x double> %a
 }
@@ -812,7 +812,7 @@ declare <vscale x 8 x double> @llvm.riscv.vfwnmsac.mask.nxv8f64.f32(
   float,
   <vscale x 8 x float>,
   <vscale x 8 x i1>,
-  iXLen);
+  iXLen, iXLen);
 
 define <vscale x 8 x double> @intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32(<vscale x 8 x double> %0, float %1, <vscale x 8 x float> %2, <vscale x 8 x i1> %3, iXLen %4) nounwind {
 ; CHECK-LABEL: intrinsic_vfwnmsac_mask_vf_nxv8f64_f32_nxv8f32:
@@ -826,7 +826,7 @@ entry:
     float %1,
     <vscale x 8 x float> %2,
     <vscale x 8 x i1> %3,
-    iXLen %4)
+    iXLen %4, iXLen 0);
 
   ret <vscale x 8 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
index 3aeaa11d2ff94..fbdfac8c0cba8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1553,7 +1553,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1574,7 +1574,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1613,7 +1613,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1634,7 +1634,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1673,7 +1673,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1694,7 +1694,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
index cfb3a432b92f0..16ea00ad78959 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmacc-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8>  @intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8>  @intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8>  @intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8>  @intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8>  @intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8>  @intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmacc.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmacc.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmacc.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmacc.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmacc.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmacc.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmacc.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmacc.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmacc.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmacc.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmacc.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmacc.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmacc.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmacc.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmacc.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1546,7 +1546,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmacc.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1560,7 +1560,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1592,7 +1592,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmacc.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1606,7 +1606,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1638,7 +1638,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmacc.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmacc_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1652,7 +1652,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
index fa46e6e8932a4..e93f3bffe82a0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1553,7 +1553,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1574,7 +1574,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1613,7 +1613,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1634,7 +1634,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1673,7 +1673,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1694,7 +1694,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
index fc401f3cba819..63695f673f302 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmadd-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8>  @intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8>  @intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8>  @intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8>  @intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8>  @intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8>  @intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vmadd.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vmadd.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vmadd.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vmadd.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vmadd.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vmadd.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vmadd.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vmadd.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vmadd.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vmadd.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vmadd.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vmadd.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vmadd.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vmadd.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vmadd.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1546,7 +1546,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vmadd.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1560,7 +1560,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1592,7 +1592,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vmadd.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1606,7 +1606,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1638,7 +1638,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vmadd.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vmadd_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1652,7 +1652,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
index 7c1c3e6fcc4f0..45a6d986a2abc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1553,7 +1553,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1574,7 +1574,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1613,7 +1613,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1634,7 +1634,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1673,7 +1673,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1694,7 +1694,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
index f9ba1e3016eab..a7173a53263ad 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsac-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8>  @intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8>  @intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8>  @intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8>  @intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8>  @intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8>  @intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsac.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsac.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsac.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsac.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsac.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsac.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsac.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsac.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsac.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsac.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsac.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsac.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsac.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsac.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsac.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1546,7 +1546,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsac.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1560,7 +1560,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1592,7 +1592,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsac.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1606,7 +1606,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1638,7 +1638,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsac.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsac_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1652,7 +1652,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
index 3cf95ae313257..8440b0610872d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1553,7 +1553,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1574,7 +1574,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1613,7 +1613,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1634,7 +1634,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1673,7 +1673,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1694,7 +1694,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
index 6519e590aed38..b18086afca243 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vnmsub-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8>  @intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i8_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8>  @intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i8_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8>  @intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i8_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8>  @intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i8_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8>  @intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i8_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8>  @intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv32i8_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i16_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i16_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i16_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i16_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv16i16_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i32_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i32_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i32_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv8i32_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.nxv1i64(
   <vscale x 1 x i64>,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv1i64_nxv1i64_nxv1i64:
@@ -732,7 +732,7 @@ entry:
     <vscale x 1 x i64> %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.nxv2i64(
   <vscale x 2 x i64>,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv2i64_nxv2i64_nxv2i64:
@@ -778,7 +778,7 @@ entry:
     <vscale x 2 x i64> %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.nxv4i64(
   <vscale x 4 x i64>,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vv_nxv4i64_nxv4i64_nxv4i64:
@@ -824,7 +824,7 @@ entry:
     <vscale x 4 x i64> %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vnmsub.mask.nxv1i8.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i8_i8_nxv1i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vnmsub.mask.nxv2i8.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i8_i8_nxv2i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vnmsub.mask.nxv4i8.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i8_i8_nxv4i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vnmsub.mask.nxv8i8.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i8_i8_nxv8i8:
@@ -1008,7 +1008,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vnmsub.mask.nxv16i8.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i8_i8_nxv16i8:
@@ -1054,7 +1054,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vnmsub.mask.nxv32i8.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv32i8_i8_nxv32i8:
@@ -1100,7 +1100,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vnmsub.mask.nxv1i16.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i16_i16_nxv1i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vnmsub.mask.nxv2i16.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i16_i16_nxv2i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vnmsub.mask.nxv4i16.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i16_i16_nxv4i16:
@@ -1238,7 +1238,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vnmsub.mask.nxv8i16.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i16_i16_nxv8i16:
@@ -1284,7 +1284,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vnmsub.mask.nxv16i16.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv16i16_i16_nxv16i16:
@@ -1330,7 +1330,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vnmsub.mask.nxv1i32.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i32_i32_nxv1i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1408,7 +1408,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vnmsub.mask.nxv2i32.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i32_i32_nxv2i32:
@@ -1422,7 +1422,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1454,7 +1454,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vnmsub.mask.nxv4i32.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i32_i32_nxv4i32:
@@ -1468,7 +1468,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1500,7 +1500,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vnmsub.mask.nxv8i32.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv8i32_i32_nxv8i32:
@@ -1514,7 +1514,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1546,7 +1546,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vnmsub.mask.nxv1i64.i64(
   i64,
   <vscale x 1 x i64>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv1i64_i64_nxv1i64:
@@ -1560,7 +1560,7 @@ entry:
     i64 %1,
     <vscale x 1 x i64> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1592,7 +1592,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vnmsub.mask.nxv2i64.i64(
   i64,
   <vscale x 2 x i64>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv2i64_i64_nxv2i64:
@@ -1606,7 +1606,7 @@ entry:
     i64 %1,
     <vscale x 2 x i64> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1638,7 +1638,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vnmsub.mask.nxv4i64.i64(
   i64,
   <vscale x 4 x i64>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vnmsub_mask_vx_nxv4i64_i64_nxv4i64:
@@ -1652,7 +1652,7 @@ entry:
     i64 %1,
     <vscale x 4 x i64> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
index 4cd2fd28752ed..26a4cc73bccbd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
   <vscale x 1 x i8>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -75,7 +75,7 @@ entry:
     <vscale x 1 x i8> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -107,7 +107,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
   <vscale x 2 x i8>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
@@ -121,7 +121,7 @@ entry:
     <vscale x 2 x i8> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -154,7 +154,7 @@ entry:
     <vscale x 2 x i8> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -186,7 +186,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
   <vscale x 4 x i8>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
@@ -200,7 +200,7 @@ entry:
     <vscale x 4 x i8> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -233,7 +233,7 @@ entry:
     <vscale x 4 x i8> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -265,7 +265,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
   <vscale x 8 x i8>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
@@ -279,7 +279,7 @@ entry:
     <vscale x 8 x i8> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -312,7 +312,7 @@ entry:
     <vscale x 8 x i8> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -344,7 +344,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
   <vscale x 16 x i8>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
@@ -358,7 +358,7 @@ entry:
     <vscale x 16 x i8> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -391,7 +391,7 @@ entry:
     <vscale x 16 x i8> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -423,7 +423,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
   <vscale x 32 x i8>,
   i32,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
@@ -437,7 +437,7 @@ entry:
     <vscale x 32 x i8> %1,
     i32 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -470,7 +470,7 @@ entry:
     <vscale x 32 x i8> %1,
     i32 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,7 +502,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
   <vscale x 1 x i16>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
@@ -516,7 +516,7 @@ entry:
     <vscale x 1 x i16> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -549,7 +549,7 @@ entry:
     <vscale x 1 x i16> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -581,7 +581,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
   <vscale x 2 x i16>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
@@ -595,7 +595,7 @@ entry:
     <vscale x 2 x i16> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -628,7 +628,7 @@ entry:
     <vscale x 2 x i16> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -660,7 +660,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
   <vscale x 4 x i16>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
@@ -674,7 +674,7 @@ entry:
     <vscale x 4 x i16> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -707,7 +707,7 @@ entry:
     <vscale x 4 x i16> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -739,7 +739,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
   <vscale x 8 x i16>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
@@ -753,7 +753,7 @@ entry:
     <vscale x 8 x i16> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -786,7 +786,7 @@ entry:
     <vscale x 8 x i16> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -818,7 +818,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
   <vscale x 16 x i16>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
@@ -832,7 +832,7 @@ entry:
     <vscale x 16 x i16> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -865,7 +865,7 @@ entry:
     <vscale x 16 x i16> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -897,7 +897,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
@@ -911,7 +911,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -944,7 +944,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -976,7 +976,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
@@ -990,7 +990,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1023,7 +1023,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1055,7 +1055,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
@@ -1069,7 +1069,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1102,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1134,7 +1134,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
@@ -1148,7 +1148,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,7 +1181,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1213,7 +1213,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
@@ -1227,7 +1227,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1260,7 +1260,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1292,7 +1292,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
@@ -1306,7 +1306,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1339,7 +1339,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1371,7 +1371,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
@@ -1385,7 +1385,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1418,7 +1418,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
   <vscale x 1 x half>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
@@ -1464,7 +1464,7 @@ entry:
     <vscale x 1 x half> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1497,7 +1497,7 @@ entry:
     <vscale x 1 x half> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1529,7 +1529,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
   <vscale x 2 x half>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
@@ -1543,7 +1543,7 @@ entry:
     <vscale x 2 x half> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1576,7 +1576,7 @@ entry:
     <vscale x 2 x half> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
   <vscale x 4 x half>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
@@ -1622,7 +1622,7 @@ entry:
     <vscale x 4 x half> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1655,7 +1655,7 @@ entry:
     <vscale x 4 x half> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1687,7 +1687,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
   <vscale x 8 x half>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
@@ -1701,7 +1701,7 @@ entry:
     <vscale x 8 x half> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1734,7 +1734,7 @@ entry:
     <vscale x 8 x half> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1766,7 +1766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
   <vscale x 16 x half>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
@@ -1780,7 +1780,7 @@ entry:
     <vscale x 16 x half> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1813,7 +1813,7 @@ entry:
     <vscale x 16 x half> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1845,7 +1845,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
   <vscale x 1 x float>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
@@ -1859,7 +1859,7 @@ entry:
     <vscale x 1 x float> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1892,7 +1892,7 @@ entry:
     <vscale x 1 x float> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1924,7 +1924,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
   <vscale x 2 x float>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
@@ -1938,7 +1938,7 @@ entry:
     <vscale x 2 x float> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -1971,7 +1971,7 @@ entry:
     <vscale x 2 x float> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2003,7 +2003,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
   <vscale x 4 x float>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
@@ -2017,7 +2017,7 @@ entry:
     <vscale x 4 x float> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2050,7 +2050,7 @@ entry:
     <vscale x 4 x float> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2082,7 +2082,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
   <vscale x 8 x float>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
@@ -2096,7 +2096,7 @@ entry:
     <vscale x 8 x float> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2129,7 +2129,7 @@ entry:
     <vscale x 8 x float> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2161,7 +2161,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
   <vscale x 1 x double>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
@@ -2175,7 +2175,7 @@ entry:
     <vscale x 1 x double> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2208,7 +2208,7 @@ entry:
     <vscale x 1 x double> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2240,7 +2240,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
   <vscale x 2 x double>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
@@ -2254,7 +2254,7 @@ entry:
     <vscale x 2 x double> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2287,7 +2287,7 @@ entry:
     <vscale x 2 x double> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2319,7 +2319,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
   <vscale x 4 x double>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
@@ -2333,7 +2333,7 @@ entry:
     <vscale x 4 x double> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x double> %a
 }
@@ -2366,7 +2366,7 @@ entry:
     <vscale x 4 x double> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
index 863c955753152..4cfe039866a11 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslidedown-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
   <vscale x 1 x i8>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -75,7 +75,7 @@ entry:
     <vscale x 1 x i8> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -107,7 +107,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
   <vscale x 2 x i8>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
@@ -121,7 +121,7 @@ entry:
     <vscale x 2 x i8> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -154,7 +154,7 @@ entry:
     <vscale x 2 x i8> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -186,7 +186,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
   <vscale x 4 x i8>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
@@ -200,7 +200,7 @@ entry:
     <vscale x 4 x i8> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -233,7 +233,7 @@ entry:
     <vscale x 4 x i8> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -265,7 +265,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
   <vscale x 8 x i8>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
@@ -279,7 +279,7 @@ entry:
     <vscale x 8 x i8> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -312,7 +312,7 @@ entry:
     <vscale x 8 x i8> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -344,7 +344,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
   <vscale x 16 x i8>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
@@ -358,7 +358,7 @@ entry:
     <vscale x 16 x i8> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -391,7 +391,7 @@ entry:
     <vscale x 16 x i8> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -423,7 +423,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
   <vscale x 32 x i8>,
   i64,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
@@ -437,7 +437,7 @@ entry:
     <vscale x 32 x i8> %1,
     i64 %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -470,7 +470,7 @@ entry:
     <vscale x 32 x i8> %1,
     i64 9,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,7 +502,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
   <vscale x 1 x i16>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
@@ -516,7 +516,7 @@ entry:
     <vscale x 1 x i16> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -549,7 +549,7 @@ entry:
     <vscale x 1 x i16> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -581,7 +581,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
   <vscale x 2 x i16>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
@@ -595,7 +595,7 @@ entry:
     <vscale x 2 x i16> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -628,7 +628,7 @@ entry:
     <vscale x 2 x i16> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -660,7 +660,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
   <vscale x 4 x i16>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
@@ -674,7 +674,7 @@ entry:
     <vscale x 4 x i16> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -707,7 +707,7 @@ entry:
     <vscale x 4 x i16> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -739,7 +739,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
   <vscale x 8 x i16>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
@@ -753,7 +753,7 @@ entry:
     <vscale x 8 x i16> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -786,7 +786,7 @@ entry:
     <vscale x 8 x i16> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -818,7 +818,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
   <vscale x 16 x i16>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
@@ -832,7 +832,7 @@ entry:
     <vscale x 16 x i16> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -865,7 +865,7 @@ entry:
     <vscale x 16 x i16> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -897,7 +897,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
   <vscale x 1 x i32>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
@@ -911,7 +911,7 @@ entry:
     <vscale x 1 x i32> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -944,7 +944,7 @@ entry:
     <vscale x 1 x i32> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -976,7 +976,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
   <vscale x 2 x i32>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
@@ -990,7 +990,7 @@ entry:
     <vscale x 2 x i32> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1023,7 +1023,7 @@ entry:
     <vscale x 2 x i32> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1055,7 +1055,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
   <vscale x 4 x i32>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
@@ -1069,7 +1069,7 @@ entry:
     <vscale x 4 x i32> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1102,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1134,7 +1134,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
   <vscale x 8 x i32>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
@@ -1148,7 +1148,7 @@ entry:
     <vscale x 8 x i32> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,7 +1181,7 @@ entry:
     <vscale x 8 x i32> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1213,7 +1213,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
@@ -1227,7 +1227,7 @@ entry:
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1260,7 +1260,7 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1292,7 +1292,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
@@ -1306,7 +1306,7 @@ entry:
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1339,7 +1339,7 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1371,7 +1371,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
@@ -1385,7 +1385,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1418,7 +1418,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
   <vscale x 1 x half>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
@@ -1464,7 +1464,7 @@ entry:
     <vscale x 1 x half> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1497,7 +1497,7 @@ entry:
     <vscale x 1 x half> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1529,7 +1529,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
   <vscale x 2 x half>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
@@ -1543,7 +1543,7 @@ entry:
     <vscale x 2 x half> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1576,7 +1576,7 @@ entry:
     <vscale x 2 x half> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
   <vscale x 4 x half>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
@@ -1622,7 +1622,7 @@ entry:
     <vscale x 4 x half> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1655,7 +1655,7 @@ entry:
     <vscale x 4 x half> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1687,7 +1687,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
   <vscale x 8 x half>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
@@ -1701,7 +1701,7 @@ entry:
     <vscale x 8 x half> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1734,7 +1734,7 @@ entry:
     <vscale x 8 x half> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1766,7 +1766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
   <vscale x 16 x half>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
@@ -1780,7 +1780,7 @@ entry:
     <vscale x 16 x half> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1813,7 +1813,7 @@ entry:
     <vscale x 16 x half> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1845,7 +1845,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
   <vscale x 1 x float>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
@@ -1859,7 +1859,7 @@ entry:
     <vscale x 1 x float> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1892,7 +1892,7 @@ entry:
     <vscale x 1 x float> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1924,7 +1924,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
   <vscale x 2 x float>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
@@ -1938,7 +1938,7 @@ entry:
     <vscale x 2 x float> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -1971,7 +1971,7 @@ entry:
     <vscale x 2 x float> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2003,7 +2003,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
   <vscale x 4 x float>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
@@ -2017,7 +2017,7 @@ entry:
     <vscale x 4 x float> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2050,7 +2050,7 @@ entry:
     <vscale x 4 x float> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2082,7 +2082,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
   <vscale x 8 x float>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
@@ -2096,7 +2096,7 @@ entry:
     <vscale x 8 x float> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2129,7 +2129,7 @@ entry:
     <vscale x 8 x float> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2161,7 +2161,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
   <vscale x 1 x double>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
@@ -2175,7 +2175,7 @@ entry:
     <vscale x 1 x double> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2208,7 +2208,7 @@ entry:
     <vscale x 1 x double> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2240,7 +2240,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
   <vscale x 2 x double>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
@@ -2254,7 +2254,7 @@ entry:
     <vscale x 2 x double> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2287,7 +2287,7 @@ entry:
     <vscale x 2 x double> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2319,7 +2319,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
   <vscale x 4 x double>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
@@ -2333,7 +2333,7 @@ entry:
     <vscale x 4 x double> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x double> %a
 }
@@ -2366,7 +2366,7 @@ entry:
     <vscale x 4 x double> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
index 3cdb4afac9692..9704fbb93e710 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
   <vscale x 1 x i8>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -75,7 +75,7 @@ entry:
     <vscale x 1 x i8> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -107,7 +107,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
   <vscale x 2 x i8>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8:
@@ -121,7 +121,7 @@ entry:
     <vscale x 2 x i8> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -154,7 +154,7 @@ entry:
     <vscale x 2 x i8> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -186,7 +186,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
   <vscale x 4 x i8>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8:
@@ -200,7 +200,7 @@ entry:
     <vscale x 4 x i8> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -233,7 +233,7 @@ entry:
     <vscale x 4 x i8> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -265,7 +265,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
   <vscale x 8 x i8>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8:
@@ -279,7 +279,7 @@ entry:
     <vscale x 8 x i8> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -312,7 +312,7 @@ entry:
     <vscale x 8 x i8> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -344,7 +344,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
   <vscale x 16 x i8>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8:
@@ -358,7 +358,7 @@ entry:
     <vscale x 16 x i8> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -391,7 +391,7 @@ entry:
     <vscale x 16 x i8> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -423,7 +423,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
   <vscale x 32 x i8>,
   i32,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8:
@@ -437,7 +437,7 @@ entry:
     <vscale x 32 x i8> %1,
     i32 %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -470,7 +470,7 @@ entry:
     <vscale x 32 x i8> %1,
     i32 9,
     <vscale x 32 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,7 +502,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
   <vscale x 1 x i16>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16:
@@ -516,7 +516,7 @@ entry:
     <vscale x 1 x i16> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -549,7 +549,7 @@ entry:
     <vscale x 1 x i16> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -581,7 +581,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
   <vscale x 2 x i16>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16:
@@ -595,7 +595,7 @@ entry:
     <vscale x 2 x i16> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -628,7 +628,7 @@ entry:
     <vscale x 2 x i16> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -660,7 +660,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
   <vscale x 4 x i16>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16:
@@ -674,7 +674,7 @@ entry:
     <vscale x 4 x i16> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -707,7 +707,7 @@ entry:
     <vscale x 4 x i16> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -739,7 +739,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
   <vscale x 8 x i16>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16:
@@ -753,7 +753,7 @@ entry:
     <vscale x 8 x i16> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -786,7 +786,7 @@ entry:
     <vscale x 8 x i16> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -818,7 +818,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
   <vscale x 16 x i16>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16:
@@ -832,7 +832,7 @@ entry:
     <vscale x 16 x i16> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -865,7 +865,7 @@ entry:
     <vscale x 16 x i16> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -897,7 +897,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
   <vscale x 1 x i32>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32:
@@ -911,7 +911,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -944,7 +944,7 @@ entry:
     <vscale x 1 x i32> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -976,7 +976,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
   <vscale x 2 x i32>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32:
@@ -990,7 +990,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1023,7 +1023,7 @@ entry:
     <vscale x 2 x i32> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1055,7 +1055,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
   <vscale x 4 x i32>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32:
@@ -1069,7 +1069,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1102,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1134,7 +1134,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
   <vscale x 8 x i32>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32:
@@ -1148,7 +1148,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,7 +1181,7 @@ entry:
     <vscale x 8 x i32> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1213,7 +1213,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
   <vscale x 1 x i64>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64:
@@ -1227,7 +1227,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1260,7 +1260,7 @@ entry:
     <vscale x 1 x i64> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1292,7 +1292,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
   <vscale x 2 x i64>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64:
@@ -1306,7 +1306,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1339,7 +1339,7 @@ entry:
     <vscale x 2 x i64> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1371,7 +1371,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
   <vscale x 4 x i64>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64:
@@ -1385,7 +1385,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1418,7 +1418,7 @@ entry:
     <vscale x 4 x i64> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
   <vscale x 1 x half>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16:
@@ -1464,7 +1464,7 @@ entry:
     <vscale x 1 x half> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1497,7 +1497,7 @@ entry:
     <vscale x 1 x half> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1529,7 +1529,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
   <vscale x 2 x half>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16:
@@ -1543,7 +1543,7 @@ entry:
     <vscale x 2 x half> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1576,7 +1576,7 @@ entry:
     <vscale x 2 x half> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
   <vscale x 4 x half>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16:
@@ -1622,7 +1622,7 @@ entry:
     <vscale x 4 x half> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1655,7 +1655,7 @@ entry:
     <vscale x 4 x half> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1687,7 +1687,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
   <vscale x 8 x half>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16:
@@ -1701,7 +1701,7 @@ entry:
     <vscale x 8 x half> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1734,7 +1734,7 @@ entry:
     <vscale x 8 x half> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1766,7 +1766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
   <vscale x 16 x half>,
   i32,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16:
@@ -1780,7 +1780,7 @@ entry:
     <vscale x 16 x half> %1,
     i32 %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1813,7 +1813,7 @@ entry:
     <vscale x 16 x half> %1,
     i32 9,
     <vscale x 16 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1845,7 +1845,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
   <vscale x 1 x float>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32:
@@ -1859,7 +1859,7 @@ entry:
     <vscale x 1 x float> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1892,7 +1892,7 @@ entry:
     <vscale x 1 x float> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1924,7 +1924,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
   <vscale x 2 x float>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32:
@@ -1938,7 +1938,7 @@ entry:
     <vscale x 2 x float> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -1971,7 +1971,7 @@ entry:
     <vscale x 2 x float> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2003,7 +2003,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
   <vscale x 4 x float>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32:
@@ -2017,7 +2017,7 @@ entry:
     <vscale x 4 x float> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2050,7 +2050,7 @@ entry:
     <vscale x 4 x float> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2082,7 +2082,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
   <vscale x 8 x float>,
   i32,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32:
@@ -2096,7 +2096,7 @@ entry:
     <vscale x 8 x float> %1,
     i32 %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2129,7 +2129,7 @@ entry:
     <vscale x 8 x float> %1,
     i32 9,
     <vscale x 8 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2161,7 +2161,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
   <vscale x 1 x double>,
   i32,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64:
@@ -2175,7 +2175,7 @@ entry:
     <vscale x 1 x double> %1,
     i32 %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2208,7 +2208,7 @@ entry:
     <vscale x 1 x double> %1,
     i32 9,
     <vscale x 1 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2240,7 +2240,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
   <vscale x 2 x double>,
   i32,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64:
@@ -2254,7 +2254,7 @@ entry:
     <vscale x 2 x double> %1,
     i32 %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2287,7 +2287,7 @@ entry:
     <vscale x 2 x double> %1,
     i32 9,
     <vscale x 2 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2319,7 +2319,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
   <vscale x 4 x double>,
   i32,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64:
@@ -2333,7 +2333,7 @@ entry:
     <vscale x 4 x double> %1,
     i32 %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x double> %a
 }
@@ -2366,7 +2366,7 @@ entry:
     <vscale x 4 x double> %1,
     i32 9,
     <vscale x 4 x i1> %2,
-    i32 %3)
+    i32 %3, i32 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
index a9a166c1056e9..aa26b208f122b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vslideup-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8(
   <vscale x 1 x i8>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i8> @intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -75,7 +75,7 @@ entry:
     <vscale x 1 x i8> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i8> %a
 }
@@ -107,7 +107,7 @@ declare <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8(
   <vscale x 2 x i8>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i8> @intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i8_nxv2i8:
@@ -121,7 +121,7 @@ entry:
     <vscale x 2 x i8> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -154,7 +154,7 @@ entry:
     <vscale x 2 x i8> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i8> %a
 }
@@ -186,7 +186,7 @@ declare <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8(
   <vscale x 4 x i8>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i8> @intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i8_nxv4i8:
@@ -200,7 +200,7 @@ entry:
     <vscale x 4 x i8> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -233,7 +233,7 @@ entry:
     <vscale x 4 x i8> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i8> %a
 }
@@ -265,7 +265,7 @@ declare <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8(
   <vscale x 8 x i8>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i8> @intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i8_nxv8i8:
@@ -279,7 +279,7 @@ entry:
     <vscale x 8 x i8> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -312,7 +312,7 @@ entry:
     <vscale x 8 x i8> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i8> %a
 }
@@ -344,7 +344,7 @@ declare <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8(
   <vscale x 16 x i8>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i8> @intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i8_nxv16i8:
@@ -358,7 +358,7 @@ entry:
     <vscale x 16 x i8> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -391,7 +391,7 @@ entry:
     <vscale x 16 x i8> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x i8> %a
 }
@@ -423,7 +423,7 @@ declare <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8(
   <vscale x 32 x i8>,
   i64,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i8> @intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv32i8_nxv32i8:
@@ -437,7 +437,7 @@ entry:
     <vscale x 32 x i8> %1,
     i64 %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -470,7 +470,7 @@ entry:
     <vscale x 32 x i8> %1,
     i64 9,
     <vscale x 32 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 32 x i8> %a
 }
@@ -502,7 +502,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16(
   <vscale x 1 x i16>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i16_nxv1i16:
@@ -516,7 +516,7 @@ entry:
     <vscale x 1 x i16> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -549,7 +549,7 @@ entry:
     <vscale x 1 x i16> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -581,7 +581,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16(
   <vscale x 2 x i16>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i16_nxv2i16:
@@ -595,7 +595,7 @@ entry:
     <vscale x 2 x i16> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -628,7 +628,7 @@ entry:
     <vscale x 2 x i16> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -660,7 +660,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16(
   <vscale x 4 x i16>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i16_nxv4i16:
@@ -674,7 +674,7 @@ entry:
     <vscale x 4 x i16> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -707,7 +707,7 @@ entry:
     <vscale x 4 x i16> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -739,7 +739,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16(
   <vscale x 8 x i16>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i16_nxv8i16:
@@ -753,7 +753,7 @@ entry:
     <vscale x 8 x i16> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -786,7 +786,7 @@ entry:
     <vscale x 8 x i16> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -818,7 +818,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16(
   <vscale x 16 x i16>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16i16_nxv16i16:
@@ -832,7 +832,7 @@ entry:
     <vscale x 16 x i16> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -865,7 +865,7 @@ entry:
     <vscale x 16 x i16> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -897,7 +897,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32(
   <vscale x 1 x i32>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i32_nxv1i32:
@@ -911,7 +911,7 @@ entry:
     <vscale x 1 x i32> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -944,7 +944,7 @@ entry:
     <vscale x 1 x i32> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -976,7 +976,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32(
   <vscale x 2 x i32>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i32_nxv2i32:
@@ -990,7 +990,7 @@ entry:
     <vscale x 2 x i32> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1023,7 +1023,7 @@ entry:
     <vscale x 2 x i32> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1055,7 +1055,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32(
   <vscale x 4 x i32>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i32_nxv4i32:
@@ -1069,7 +1069,7 @@ entry:
     <vscale x 4 x i32> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1102,7 +1102,7 @@ entry:
     <vscale x 4 x i32> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1134,7 +1134,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32(
   <vscale x 8 x i32>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8i32_nxv8i32:
@@ -1148,7 +1148,7 @@ entry:
     <vscale x 8 x i32> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1181,7 +1181,7 @@ entry:
     <vscale x 8 x i32> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1213,7 +1213,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64(
   <vscale x 1 x i64>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1i64_nxv1i64:
@@ -1227,7 +1227,7 @@ entry:
     <vscale x 1 x i64> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1260,7 +1260,7 @@ entry:
     <vscale x 1 x i64> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1292,7 +1292,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64(
   <vscale x 2 x i64>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2i64_nxv2i64:
@@ -1306,7 +1306,7 @@ entry:
     <vscale x 2 x i64> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1339,7 +1339,7 @@ entry:
     <vscale x 2 x i64> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1371,7 +1371,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64(
   <vscale x 4 x i64>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4i64_nxv4i64:
@@ -1385,7 +1385,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1418,7 +1418,7 @@ entry:
     <vscale x 4 x i64> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1450,7 +1450,7 @@ declare <vscale x 1 x half> @llvm.riscv.vslideup.mask.nxv1f16(
   <vscale x 1 x half>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x half> @intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f16_nxv1f16:
@@ -1464,7 +1464,7 @@ entry:
     <vscale x 1 x half> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1497,7 +1497,7 @@ entry:
     <vscale x 1 x half> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x half> %a
 }
@@ -1529,7 +1529,7 @@ declare <vscale x 2 x half> @llvm.riscv.vslideup.mask.nxv2f16(
   <vscale x 2 x half>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x half> @intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f16_nxv2f16:
@@ -1543,7 +1543,7 @@ entry:
     <vscale x 2 x half> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1576,7 +1576,7 @@ entry:
     <vscale x 2 x half> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x half> %a
 }
@@ -1608,7 +1608,7 @@ declare <vscale x 4 x half> @llvm.riscv.vslideup.mask.nxv4f16(
   <vscale x 4 x half>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x half> @intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f16_nxv4f16:
@@ -1622,7 +1622,7 @@ entry:
     <vscale x 4 x half> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1655,7 +1655,7 @@ entry:
     <vscale x 4 x half> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x half> %a
 }
@@ -1687,7 +1687,7 @@ declare <vscale x 8 x half> @llvm.riscv.vslideup.mask.nxv8f16(
   <vscale x 8 x half>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x half> @intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f16_nxv8f16:
@@ -1701,7 +1701,7 @@ entry:
     <vscale x 8 x half> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1734,7 +1734,7 @@ entry:
     <vscale x 8 x half> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x half> %a
 }
@@ -1766,7 +1766,7 @@ declare <vscale x 16 x half> @llvm.riscv.vslideup.mask.nxv16f16(
   <vscale x 16 x half>,
   i64,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x half> @intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i64 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv16f16_nxv16f16:
@@ -1780,7 +1780,7 @@ entry:
     <vscale x 16 x half> %1,
     i64 %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1813,7 +1813,7 @@ entry:
     <vscale x 16 x half> %1,
     i64 9,
     <vscale x 16 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 16 x half> %a
 }
@@ -1845,7 +1845,7 @@ declare <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32(
   <vscale x 1 x float>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x float> @intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f32_nxv1f32:
@@ -1859,7 +1859,7 @@ entry:
     <vscale x 1 x float> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1892,7 +1892,7 @@ entry:
     <vscale x 1 x float> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x float> %a
 }
@@ -1924,7 +1924,7 @@ declare <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32(
   <vscale x 2 x float>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x float> @intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f32_nxv2f32:
@@ -1938,7 +1938,7 @@ entry:
     <vscale x 2 x float> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -1971,7 +1971,7 @@ entry:
     <vscale x 2 x float> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x float> %a
 }
@@ -2003,7 +2003,7 @@ declare <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32(
   <vscale x 4 x float>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x float> @intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f32_nxv4f32:
@@ -2017,7 +2017,7 @@ entry:
     <vscale x 4 x float> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2050,7 +2050,7 @@ entry:
     <vscale x 4 x float> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x float> %a
 }
@@ -2082,7 +2082,7 @@ declare <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32(
   <vscale x 8 x float>,
   i64,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x float> @intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i64 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv8f32_nxv8f32:
@@ -2096,7 +2096,7 @@ entry:
     <vscale x 8 x float> %1,
     i64 %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2129,7 +2129,7 @@ entry:
     <vscale x 8 x float> %1,
     i64 9,
     <vscale x 8 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 8 x float> %a
 }
@@ -2161,7 +2161,7 @@ declare <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64(
   <vscale x 1 x double>,
   i64,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x double> @intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv1f64_nxv1f64:
@@ -2175,7 +2175,7 @@ entry:
     <vscale x 1 x double> %1,
     i64 %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2208,7 +2208,7 @@ entry:
     <vscale x 1 x double> %1,
     i64 9,
     <vscale x 1 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 1 x double> %a
 }
@@ -2240,7 +2240,7 @@ declare <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64(
   <vscale x 2 x double>,
   i64,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x double> @intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv2f64_nxv2f64:
@@ -2254,7 +2254,7 @@ entry:
     <vscale x 2 x double> %1,
     i64 %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2287,7 +2287,7 @@ entry:
     <vscale x 2 x double> %1,
     i64 9,
     <vscale x 2 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 2 x double> %a
 }
@@ -2319,7 +2319,7 @@ declare <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64(
   <vscale x 4 x double>,
   i64,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x double> @intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vslideup_mask_vx_nxv4f64_nxv4f64:
@@ -2333,7 +2333,7 @@ entry:
     <vscale x 4 x double> %1,
     i64 %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x double> %a
 }
@@ -2366,7 +2366,7 @@ entry:
     <vscale x 4 x double> %1,
     i64 9,
     <vscale x 4 x i1> %2,
-    i64 %3)
+    i64 %3, i64 0)
 
   ret <vscale x 4 x double> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
index a68c26ba9180c..5fb3a63b137fb 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
index b3922ccb1ce43..2b547165170f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmacc-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16>  @intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32>  @intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64>  @intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16> @intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32> @intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64> @intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmacc_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
index 3b3ed3ce683ab..1522eb1e86b21 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
index 07f11b1ef1615..ab86a96db8bd0 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccsu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16>  @intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32>  @intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64>  @intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccsu_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
index 4d7057a8f81a2..183fd36188a90 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
index 623e00c03aadb..49b2fec00220a 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccu-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     <vscale x 1 x i8> %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
   <vscale x 2 x i8>,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     <vscale x 2 x i8> %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
   <vscale x 4 x i8>,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     <vscale x 4 x i8> %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
   <vscale x 8 x i8>,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     <vscale x 8 x i8> %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
   <vscale x 16 x i8>,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     <vscale x 16 x i8> %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
   <vscale x 32 x i8>,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     <vscale x 32 x i8> %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
   <vscale x 1 x i16>,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     <vscale x 1 x i16> %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
   <vscale x 2 x i16>,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     <vscale x 2 x i16> %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
   <vscale x 4 x i16>,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     <vscale x 4 x i16> %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
   <vscale x 8 x i16>,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     <vscale x 8 x i16> %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
   <vscale x 16 x i16>,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     <vscale x 16 x i16> %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
   <vscale x 1 x i32>,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     <vscale x 1 x i32> %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
   <vscale x 2 x i32>,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     <vscale x 2 x i32> %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
   <vscale x 4 x i32>,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     <vscale x 4 x i32> %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
   <vscale x 8 x i32>,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     <vscale x 8 x i32> %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }
@@ -718,7 +718,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
@@ -732,7 +732,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -764,7 +764,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
@@ -778,7 +778,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -810,7 +810,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
@@ -824,7 +824,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -856,7 +856,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
@@ -870,7 +870,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -902,7 +902,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
@@ -916,7 +916,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -948,7 +948,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
@@ -962,7 +962,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -994,7 +994,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
@@ -1008,7 +1008,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -1040,7 +1040,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
@@ -1054,7 +1054,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -1086,7 +1086,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
@@ -1100,7 +1100,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -1132,7 +1132,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
@@ -1146,7 +1146,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -1178,7 +1178,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
@@ -1192,7 +1192,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -1224,7 +1224,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
@@ -1238,7 +1238,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -1270,7 +1270,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
@@ -1284,7 +1284,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -1316,7 +1316,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
@@ -1330,7 +1330,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -1362,7 +1362,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
@@ -1376,7 +1376,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
index 6a3aabba1be27..ccde867da07c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv32.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i32);
+  i32, i32);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i32 %4)
+    i32 %4, i32 0)
 
   ret <vscale x 8 x i64> %a
 }

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
index 6786754645fdc..95b164270ced5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vwmaccus-rv64.ll
@@ -28,7 +28,7 @@ declare <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8(
   i8,
   <vscale x 1 x i8>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i16> @intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i16_i8_nxv1i8:
@@ -42,7 +42,7 @@ entry:
     i8 %1,
     <vscale x 1 x i8> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i16> %a
 }
@@ -74,7 +74,7 @@ declare <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8(
   i8,
   <vscale x 2 x i8>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i16> @intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i16_i8_nxv2i8:
@@ -88,7 +88,7 @@ entry:
     i8 %1,
     <vscale x 2 x i8> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i16> %a
 }
@@ -120,7 +120,7 @@ declare <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8(
   i8,
   <vscale x 4 x i8>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i16> @intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i16_i8_nxv4i8:
@@ -134,7 +134,7 @@ entry:
     i8 %1,
     <vscale x 4 x i8> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i16> %a
 }
@@ -166,7 +166,7 @@ declare <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8(
   i8,
   <vscale x 8 x i8>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i16> @intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i16_i8_nxv8i8:
@@ -180,7 +180,7 @@ entry:
     i8 %1,
     <vscale x 8 x i8> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i16> %a
 }
@@ -212,7 +212,7 @@ declare <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8(
   i8,
   <vscale x 16 x i8>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i16> @intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i16_i8_nxv16i8:
@@ -226,7 +226,7 @@ entry:
     i8 %1,
     <vscale x 16 x i8> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i16> %a
 }
@@ -258,7 +258,7 @@ declare <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8(
   i8,
   <vscale x 32 x i8>,
   <vscale x 32 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 32 x i16> @intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv32i16_i8_nxv32i8:
@@ -272,7 +272,7 @@ entry:
     i8 %1,
     <vscale x 32 x i8> %2,
     <vscale x 32 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 32 x i16> %a
 }
@@ -304,7 +304,7 @@ declare <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16(
   i16,
   <vscale x 1 x i16>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i32> @intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i32_i16_nxv1i16:
@@ -318,7 +318,7 @@ entry:
     i16 %1,
     <vscale x 1 x i16> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i32> %a
 }
@@ -350,7 +350,7 @@ declare <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16(
   i16,
   <vscale x 2 x i16>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i32> @intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i32_i16_nxv2i16:
@@ -364,7 +364,7 @@ entry:
     i16 %1,
     <vscale x 2 x i16> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i32> %a
 }
@@ -396,7 +396,7 @@ declare <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16(
   i16,
   <vscale x 4 x i16>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i32> @intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i32_i16_nxv4i16:
@@ -410,7 +410,7 @@ entry:
     i16 %1,
     <vscale x 4 x i16> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i32> %a
 }
@@ -442,7 +442,7 @@ declare <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16(
   i16,
   <vscale x 8 x i16>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i32> @intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i32_i16_nxv8i16:
@@ -456,7 +456,7 @@ entry:
     i16 %1,
     <vscale x 8 x i16> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i32> %a
 }
@@ -488,7 +488,7 @@ declare <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16(
   i16,
   <vscale x 16 x i16>,
   <vscale x 16 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 16 x i32> @intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv16i32_i16_nxv16i16:
@@ -502,7 +502,7 @@ entry:
     i16 %1,
     <vscale x 16 x i16> %2,
     <vscale x 16 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 16 x i32> %a
 }
@@ -534,7 +534,7 @@ declare <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32(
   i32,
   <vscale x 1 x i32>,
   <vscale x 1 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 1 x i64> @intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv1i64_i32_nxv1i32:
@@ -548,7 +548,7 @@ entry:
     i32 %1,
     <vscale x 1 x i32> %2,
     <vscale x 1 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 1 x i64> %a
 }
@@ -580,7 +580,7 @@ declare <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32(
   i32,
   <vscale x 2 x i32>,
   <vscale x 2 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 2 x i64> @intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv2i64_i32_nxv2i32:
@@ -594,7 +594,7 @@ entry:
     i32 %1,
     <vscale x 2 x i32> %2,
     <vscale x 2 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 2 x i64> %a
 }
@@ -626,7 +626,7 @@ declare <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32(
   i32,
   <vscale x 4 x i32>,
   <vscale x 4 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 4 x i64> @intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv4i64_i32_nxv4i32:
@@ -640,7 +640,7 @@ entry:
     i32 %1,
     <vscale x 4 x i32> %2,
     <vscale x 4 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 4 x i64> %a
 }
@@ -672,7 +672,7 @@ declare <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32(
   i32,
   <vscale x 8 x i32>,
   <vscale x 8 x i1>,
-  i64);
+  i64, i64);
 
 define <vscale x 8 x i64> @intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
 ; CHECK-LABEL: intrinsic_vwmaccus_mask_vx_nxv8i64_i32_nxv8i32:
@@ -686,7 +686,7 @@ entry:
     i32 %1,
     <vscale x 8 x i32> %2,
     <vscale x 8 x i1> %3,
-    i64 %4)
+    i64 %4, i64 0)
 
   ret <vscale x 8 x i64> %a
 }


        


More information about the cfe-commits mailing list