[clang] 224d29e - [1/7][Clang][RISCV] Remove default tail-undisturbed for vector reduction intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Mon Jan 23 23:31:59 PST 2023


Author: eopXD
Date: 2023-01-23T23:31:50-08:00
New Revision: 224d29e6f54320c927d754b08d81237fc9ef5936

URL: https://github.com/llvm/llvm-project/commit/224d29e6f54320c927d754b08d81237fc9ef5936
DIFF: https://github.com/llvm/llvm-project/commit/224d29e6f54320c927d754b08d81237fc9ef5936.diff

LOG: [1/7][Clang][RISCV] Remove default tail-undisturbed for vector reduction intrinsics

The destination parameter is removed for non-policy unmasked intrinsics,
they are now tail agnostic. The non-policy masked intrinsics are tail
agnostic and masked undisturbed, just like normal instructions like
vadd.

This is the 1st commit of a patch-set that aims to remove the
`IsPrototypeDefaultTU` special case for the rvv-intrinsics.

The patch-set work towards the simplification proposal [0] of Nick
Knight, the plan is that after this patch-set, all non-policy
intrinsics will be aligned with default policy behavior of tail
agnostic and mask undisturbed. Then the next patch-set will aim
towards changing non-policy intrinsics to tail agnostic and mask
agnostic.

[0] https://gist.github.com/nick-knight/6cb0b74b351a25323dfb1821d3a269b9

Pull Request: riscv-non-isa/rvv-intrinsic-doc#186

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140895

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 6a68993544ac1..3bd463324db6b 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -297,6 +297,11 @@ multiclass RVVOp0BuiltinSet<string intrinsic_name, string type_range,
                             list<list<string>> suffixes_prototypes>
     : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [0]>;
 
+// IntrinsicTypes is output, op1 [-1, 0]
+multiclass RVVOutOp0BuiltinSet<string intrinsic_name, string type_range,
+                               list<list<string>> suffixes_prototypes>
+    : RVVBuiltinSet<intrinsic_name, type_range, suffixes_prototypes, [-1, 0]>;
+
 // IntrinsicTypes is output, op1 [-1, 1]
 multiclass RVVOutOp1BuiltinSet<string intrinsic_name, string type_range,
                                list<list<string>> suffixes_prototypes>
@@ -496,22 +501,22 @@ class RVVConvToNarrowingSignedBuiltin<string overloaded_name>
 class RVVConvToNarrowingUnsignedBuiltin<string overloaded_name>
     : RVVConvBuiltin<"Uv", "UvFw", "csi", overloaded_name>;
 
-let HasMaskedOffOperand = false in {
+let HasMaskedOffOperand = true in {
   multiclass RVVSignedReductionBuiltin {
-    defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
-                                  [["vs", "vSv", "SvSvvSv"]]>;
+    defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
+                                  [["vs", "vSv", "SvvSv"]]>;
   }
   multiclass RVVUnsignedReductionBuiltin {
-    defm "" : RVVOutOp1BuiltinSet<NAME, "csil",
-                                  [["vs", "UvUSv", "USvUSvUvUSv"]]>;
+    defm "" : RVVOutOp0BuiltinSet<NAME, "csil",
+                                  [["vs", "UvUSv", "USvUvUSv"]]>;
   }
   multiclass RVVFloatingReductionBuiltin {
-    defm "" : RVVOutOp1BuiltinSet<NAME, "xfd",
-                                  [["vs", "vSv", "SvSvvSv"]]>;
+    defm "" : RVVOutOp0BuiltinSet<NAME, "xfd",
+                                  [["vs", "vSv", "SvvSv"]]>;
   }
   multiclass RVVFloatingWidenReductionBuiltin {
-    defm "" : RVVOutOp1BuiltinSet<NAME, "xf",
-                                  [["vs", "vSw", "SwSwvSw"]]>;
+    defm "" : RVVOutOp0BuiltinSet<NAME, "xf",
+                                  [["vs", "vSw", "SwvSw"]]>;
   }
 }
 
@@ -2079,7 +2084,7 @@ let Log2LMUL = [-3, -2, -1, 0, 1, 2] in {
 // 15.1. Vector Single-Width Integer Reduction Instructions
 let UnMaskedPolicyScheme = HasPassthruOperand,
     MaskedPolicyScheme = HasPassthruOperand,
-    IsPrototypeDefaultTU = true,
+    IsPrototypeDefaultTU = false,
     HasMaskPolicy = false in {
 defm vredsum : RVVIntReductionBuiltinSet;
 defm vredmaxu : RVVUnsignedReductionBuiltin;
@@ -2092,11 +2097,11 @@ defm vredxor : RVVIntReductionBuiltinSet;
 
 // 15.2. Vector Widening Integer Reduction Instructions
 // Vector Widening Integer Reduction Operations
-let HasMaskedOffOperand = false in {
-  defm vwredsum : RVVOutOp1BuiltinSet<"vwredsum", "csi",
-                                      [["vs", "vSw", "SwSwvSw"]]>;
-  defm vwredsumu : RVVOutOp1BuiltinSet<"vwredsumu", "csi",
-                                       [["vs", "UvUSw", "USwUSwUvUSw"]]>;
+let HasMaskedOffOperand = true in {
+  defm vwredsum : RVVOutOp0BuiltinSet<"vwredsum", "csi",
+                                      [["vs", "vSw", "SwvSw"]]>;
+  defm vwredsumu : RVVOutOp0BuiltinSet<"vwredsumu", "csi",
+                                       [["vs", "UvUSw", "USwUvUSw"]]>;
 }
 
 // 15.3. Vector Single-Width Floating-Point Reduction Instructions

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c
index 614474b6836d4..79d4f05c9f0ad 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmax.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m1_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m8_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c
index 60aa64678ad30..945fa03593cd2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredmin.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m1_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m8_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c
index 9bf2d6b5f63e1..9d7ac81dfe797 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredosum.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m1_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m8_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c
index c96419943f183..94465a60d7602 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfredusum.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16mf4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16mf2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m1_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m1_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m2_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m2_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m4_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m4_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m8_f16m1(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m8_f16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16mf4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16mf2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16mf2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m1_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m1_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m2_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m2_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m4_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m4_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum_vs_f16m8_f16m1_m(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum_vs_f16m8_f16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum_vs_f32m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum_vs_f32m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum_vs_f64m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum_vs_f64m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c
index 40d56e0666dae..ea0019c30ad34 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredosum.c
@@ -9,199 +9,199 @@
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32mf2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32mf2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32mf2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c
index e8f3dd9866c8a..821cc23ea3f55 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfwredusum.c
@@ -9,199 +9,199 @@
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16mf4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16mf2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m1_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m1_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m2_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m2_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m4_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m4_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m8_f32m1(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m8_f32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32mf2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32mf2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m1_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m1_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m2_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m2_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m4_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m4_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m8_f64m1(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m8_f64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16mf4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16mf2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16mf2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m1_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m1_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m2_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m2_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m4_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m4_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f16m8_f32m1_m(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f16m8_f32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32mf2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32mf2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m1_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m1_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m2_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m2_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m4_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m4_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum_vs_f32m8_f64m1_m(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum_vs_f32m8_f64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c
index 7a85e62c78178..58ad2b62a3a6c 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredand.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c
index 0c5c35afadca7..266b45d5c3318 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmax.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c
index ec0826fb2f9b8..9b1fc33effb6f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmaxu.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c
index 96695c0d47c5b..d56732df9a97d 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredmin.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c
index 49f15a0a31a0f..df55d4f36b6e3 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredminu.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c
index c0d69c6cd2d0e..fbc0c4ca6903b 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredor.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c
index 3b987a67847e8..c286d4d4ff2c1 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredsum.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c
index 7fa92039052eb..e392dae1f62bf 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vredxor.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m1_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m1_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m2_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m2_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m4_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m4_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m8_i8m1(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m8_i8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m1_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m1_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m2_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m2_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m4_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m4_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m8_u8m1(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m8_u8m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8mf2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8mf2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m1_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m1_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m2_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m2_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m4_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m4_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor_vs_i8m8_i8m1_m(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor_vs_i8m8_i8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor_vs_i16m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor_vs_i16m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor_vs_i32m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor_vs_i32m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor_vs_i64m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor_vs_i64m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8mf2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8mf2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m1_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m1_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m2_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m2_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m4_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m4_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor_vs_u8m8_u8m1_m(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor_vs_u8m8_u8m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor_vs_u16m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor_vs_u16m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor_vs_u32m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor_vs_u32m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor_vs_u64m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor_vs_u64m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c
index b1bf425ebd05b..496c0f4427264 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsum.c
@@ -8,325 +8,325 @@
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m1_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m1_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m2_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m2_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m4_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m4_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m8_i16m1(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m8_i16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16mf4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16mf2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m1_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m1_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m2_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m2_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m4_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m4_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m8_i32m1(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m8_i32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32mf2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32mf2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m1_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m1_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m2_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m2_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m4_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m4_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m8_i64m1(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m8_i64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8mf2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8mf2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m1_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m1_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m2_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m2_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m4_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m4_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum_vs_i8m8_i16m1_m(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum_vs_i8m8_i16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16mf4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16mf2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16mf2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m1_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m1_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m2_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m2_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m4_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m4_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum_vs_i16m8_i32m1_m(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum_vs_i16m8_i32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32mf2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32mf2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m1_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m1_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m2_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m2_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m4_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m4_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum_vs_i32m8_i64m1_m(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum_vs_i32m8_i64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c
index a1c6a33c1ae78..a51c6b879d442 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vwredsumu.c
@@ -8,325 +8,325 @@
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m1_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m1_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m2_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m2_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m4_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m4_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m8_u16m1(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m8_u16m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16mf4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16mf2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m1_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m1_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m2_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m2_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m4_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m4_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m8_u32m1(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m8_u32m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32mf2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32mf2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m1_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m1_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m2_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m2_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m4_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m4_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m8_u64m1(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m8_u64m1(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8mf2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8mf2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m1_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m1_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m2_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m2_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m4_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m4_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u8m8_u16m1_m(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u8m8_u16m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16mf4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16mf2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16mf2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m1_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m1_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m2_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m2_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m4_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m4_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u16m8_u32m1_m(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u16m8_u32m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32mf2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32mf2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m1_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m1_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m2_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m2_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m4_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m4_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu_vs_u32m8_u64m1_m(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu_vs_u32m8_u64m1_m(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
index 885dae5ddfbb4..901747a7befc6 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmax.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmax_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmax_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmax_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmax(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmax_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmax(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
index 38bd974c74095..e81a59288fb03 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredmin.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredmin.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredmin_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredmin.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredmin_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredmin_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredmin.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredmin(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredmin_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredmin(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
index bf220f6ba3203..bcf574a05243e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredosum.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredosum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredosum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredosum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredosum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredosum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredosum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredosum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredosum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
index d51759df8a5fd..338fa0a6644d4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfredusum.c
@@ -9,271 +9,271 @@
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv1f16.i64(<vscale x 4 x half> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1(vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1(vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1(vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1(vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv16f16.i64(<vscale x 4 x half> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1(vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.nxv4f16.nxv32f16.i64(<vscale x 4 x half> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1(vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1(vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1(vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1(vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv8f32.i64(<vscale x 2 x float> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1(vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.nxv2f32.nxv16f32.i64(<vscale x 2 x float> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1(vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1(vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1(vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv4f64.i64(<vscale x 1 x double> poison, <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1(vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.nxv1f64.nxv8f64.i64(<vscale x 1 x double> poison, <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1(vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv1f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t dest, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf4_f16m1_m(vbool64_t mask, vfloat16m1_t maskedoff, vfloat16mf4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16mf2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t dest, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16mf2_f16m1_m(vbool32_t mask, vfloat16m1_t maskedoff, vfloat16mf2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m1_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m1_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m2_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t dest, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m2_f16m1_m(vbool8_t mask, vfloat16m1_t maskedoff, vfloat16m2_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m4_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv16f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t dest, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m4_f16m1_m(vbool4_t mask, vfloat16m1_t maskedoff, vfloat16m4_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f16m8_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfredusum.mask.nxv4f16.nxv32f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 4 x half> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t dest, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat16m1_t test_vfredusum_vs_f16m8_f16m1_m(vbool2_t mask, vfloat16m1_t maskedoff, vfloat16m8_t vector, vfloat16m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32mf2_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat32mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m1_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m2_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat32m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv8f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m4_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat32m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f32m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfredusum.mask.nxv2f32.nxv16f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfredusum_vs_f32m8_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat32m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m1_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m2_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat64m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv4f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m4_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat64m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfredusum_vs_f64m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfredusum.mask.nxv1f64.nxv8f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfredusum_vs_f64m8_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat64m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfredusum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
index 716968a0b41e7..2c1730d6048b8 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredosum.c
@@ -9,199 +9,199 @@
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f16m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredosum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredosum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredosum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredosum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredosum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredosum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredosum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
index 0ff93428fabcb..0634a79803b5f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfwredusum.c
@@ -9,199 +9,199 @@
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv1f16.i64(<vscale x 2 x float> poison, <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1(vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv2f16.i64(<vscale x 2 x float> poison, <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1(vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv4f16.i64(<vscale x 2 x float> poison, <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1(vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv8f16.i64(<vscale x 2 x float> poison, <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1(vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv16f16.i64(<vscale x 2 x float> poison, <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1(vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.nxv2f32.nxv32f16.i64(<vscale x 2 x float> poison, <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1(vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv1f32.i64(<vscale x 1 x double> poison, <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1(vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv2f32.i64(<vscale x 1 x double> poison, <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1(vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv4f32.i64(<vscale x 1 x double> poison, <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1(vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv8f32.i64(<vscale x 1 x double> poison, <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1(vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.nxv1f64.nxv16f32.i64(<vscale x 1 x double> poison, <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1(vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv1f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t dest, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf4_f32m1_m(vbool64_t mask, vfloat32m1_t maskedoff, vfloat16mf4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16mf2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv2f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16mf2_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat16mf2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m1_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv4f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t dest, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m1_f32m1_m(vbool16_t mask, vfloat32m1_t maskedoff, vfloat16m1_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m2_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv8f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t dest, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m2_f32m1_m(vbool8_t mask, vfloat32m1_t maskedoff, vfloat16m2_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m4_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv16f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t dest, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m4_f32m1_m(vbool4_t mask, vfloat32m1_t maskedoff, vfloat16m4_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f16m8_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfwredusum.mask.nxv2f32.nxv32f16.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[VECTOR:%.*]], <vscale x 2 x float> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t dest, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat32m1_t test_vfwredusum_vs_f16m8_f32m1_m(vbool2_t mask, vfloat32m1_t maskedoff, vfloat16m8_t vector, vfloat32m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32mf2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv1f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32mf2_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat32mf2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m1_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv2f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t dest, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m1_f64m1_m(vbool32_t mask, vfloat64m1_t maskedoff, vfloat32m1_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m2_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv4f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t dest, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m2_f64m1_m(vbool16_t mask, vfloat64m1_t maskedoff, vfloat32m2_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m4_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv8f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t dest, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m4_f64m1_m(vbool8_t mask, vfloat64m1_t maskedoff, vfloat32m4_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfwredusum_vs_f32m8_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfwredusum.mask.nxv1f64.nxv16f32.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[VECTOR:%.*]], <vscale x 1 x double> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t dest, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
-  return vfwredusum(mask, dest, vector, scalar, vl);
+vfloat64m1_t test_vfwredusum_vs_f32m8_f64m1_m(vbool4_t mask, vfloat64m1_t maskedoff, vfloat32m8_t vector, vfloat64m1_t scalar, size_t vl) {
+  return vfwredusum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
index f93da0e65972b..2e8487f66ede2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredand.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredand_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredand_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredand_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredand_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredand_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredand_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredand_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredand_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredand(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredand_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredand(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
index d1022a222c9b3..7f08e8e329f67 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmax.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmax.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmax_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmax.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmax_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmax.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmax_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmax_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmax.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmax(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmax_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmax(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
index c9d69534b361a..525b65d805fdd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmaxu.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmaxu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredmaxu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmaxu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredmaxu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmaxu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredmaxu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmaxu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmaxu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredmaxu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredmaxu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredmaxu(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
index 8e08230a1ee20..0e6f763c991fd 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredmin.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredmin.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredmin_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredmin.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredmin_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredmin.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredmin_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredmin_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredmin.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredmin(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredmin_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredmin(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
index a9bef115b6711..b6762bc7f9811 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredminu.c
@@ -8,397 +8,397 @@
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredminu.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredminu_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredminu.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredminu_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredminu.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredminu_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredminu_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredminu.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredminu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredminu_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredminu(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
index 43f50cc99dbcb..d0ad6c445285f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredor.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredor(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
index d95c8f3b8d08d..91250bff7c24e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredsum.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredsum_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredsum_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredsum_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredsum_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredsum.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredsum_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredsum.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredsum_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredsum.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredsum_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredsum_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredsum.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredsum(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredsum_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredsum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
index d75f29413187c..f092e99819147 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vredxor.c
@@ -8,793 +8,793 @@
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf8_i8m1(vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf4_i8m1(vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf2_i8m1(vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m1_i8m1(vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m2_i8m1(vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m4_i8m1(vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m8_i8m1(vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf4_i16m1(vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf2_i16m1(vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m1_i16m1(vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m2_i16m1(vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m4_i16m1(vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m8_i16m1(vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32mf2_i32m1(vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m1_i32m1(vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m2_i32m1(vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m4_i32m1(vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m8_i32m1(vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m1_i64m1(vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m2_i64m1(vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m4_i64m1(vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m8_i64m1(vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1(vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1(vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1(vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m1_u8m1(vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m2_u8m1(vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m4_u8m1(vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m8_u8m1(vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1(vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1(vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m1_u16m1(vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m2_u16m1(vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m4_u16m1(vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m8_u16m1(vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1(vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m1_u32m1(vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m2_u32m1(vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m4_u32m1(vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m8_u32m1(vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m1_u64m1(vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m2_u64m1(vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> poison, <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m4_u64m1(vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> poison, <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m8_u64m1(vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t dest, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf8_i8m1_m(vbool64_t mask, vint8m1_t maskedoff, vint8mf8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t dest, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf4_i8m1_m(vbool32_t mask, vint8m1_t maskedoff, vint8mf4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8mf2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t dest, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8mf2_i8m1_m(vbool16_t mask, vint8m1_t maskedoff, vint8mf2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m1_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m1_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m2_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t dest, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m2_i8m1_m(vbool4_t mask, vint8m1_t maskedoff, vint8m2_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m4_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t dest, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m4_i8m1_m(vbool2_t mask, vint8m1_t maskedoff, vint8m4_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i8m8_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t dest, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint8m1_t test_vredxor_vs_i8m8_i8m1_m(vbool1_t mask, vint8m1_t maskedoff, vint8m8_t vector, vint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t dest, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf4_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint16mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t dest, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16mf2_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint16mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m1_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t dest, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m2_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint16m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t dest, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m4_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint16m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i16m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t dest, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint16m1_t test_vredxor_vs_i16m8_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint16m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t dest, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32mf2_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint32mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m1_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t dest, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m2_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint32m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t dest, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m4_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint32m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i32m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t dest, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint32m1_t test_vredxor_vs_i32m8_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint32m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m1_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t dest, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m2_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint64m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t dest, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m4_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint64m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_i64m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t dest, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vint64m1_t test_vredxor_vs_i64m8_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint64m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv1i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t dest, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf8_u8m1_m(vbool64_t mask, vuint8m1_t maskedoff, vuint8mf8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv2i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t dest, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf4_u8m1_m(vbool32_t mask, vuint8m1_t maskedoff, vuint8mf4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8mf2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t dest, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8mf2_u8m1_m(vbool16_t mask, vuint8m1_t maskedoff, vuint8mf2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m1_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m1_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m2_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t dest, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m2_u8m1_m(vbool4_t mask, vuint8m1_t maskedoff, vuint8m2_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m4_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv32i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t dest, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m4_u8m1_m(vbool2_t mask, vuint8m1_t maskedoff, vuint8m4_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u8m8_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vredxor.mask.nxv8i8.nxv64i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 8 x i8> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t dest, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint8m1_t test_vredxor_vs_u8m8_u8m1_m(vbool1_t mask, vuint8m1_t maskedoff, vuint8m8_t vector, vuint8m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv1i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf4_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint16mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16mf2_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint16mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m1_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m2_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint16m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv16i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m4_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint16m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u16m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vredxor.mask.nxv4i16.nxv32i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vredxor_vs_u16m8_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint16m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32mf2_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint32mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m1_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m2_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint32m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv8i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m4_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint32m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u32m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vredxor.mask.nxv2i32.nxv16i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vredxor_vs_u32m8_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint32m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m1_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m2_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint64m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv4i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m4_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint64m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vredxor_vs_u64m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vredxor.mask.nxv1i64.nxv8i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vredxor(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vredxor_vs_u64m8_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint64m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vredxor(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
index 60286aa40d61b..e3e1fca428b5e 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsum.c
@@ -8,325 +8,325 @@
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1(vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1(vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1(vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m1_i16m1(vint8m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m2_i16m1(vint8m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m4_i16m1(vint8m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m8_i16m1(vint8m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1(vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1(vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m1_i32m1(vint16m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m2_i32m1(vint16m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m4_i32m1(vint16m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m8_i32m1(vint16m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1(vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m1_i64m1(vint32m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m2_i64m1(vint32m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m4_i64m1(vint32m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m8_i64m1(vint32m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t dest, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf8_i16m1_m(vbool64_t mask, vint16m1_t maskedoff, vint8mf8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t dest, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf4_i16m1_m(vbool32_t mask, vint16m1_t maskedoff, vint8mf4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8mf2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t dest, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8mf2_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m1_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t dest, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m1_i16m1_m(vbool8_t mask, vint16m1_t maskedoff, vint8m1_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m2_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t dest, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m2_i16m1_m(vbool4_t mask, vint16m1_t maskedoff, vint8m2_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m4_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t dest, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m4_i16m1_m(vbool2_t mask, vint16m1_t maskedoff, vint8m4_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i8m8_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsum.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t dest, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint16m1_t test_vwredsum_vs_i8m8_i16m1_m(vbool1_t mask, vint16m1_t maskedoff, vint8m8_t vector, vint16m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t dest, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf4_i32m1_m(vbool64_t mask, vint32m1_t maskedoff, vint16mf4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16mf2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t dest, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16mf2_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m1_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t dest, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m1_i32m1_m(vbool16_t mask, vint32m1_t maskedoff, vint16m1_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m2_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t dest, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m2_i32m1_m(vbool8_t mask, vint32m1_t maskedoff, vint16m2_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m4_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t dest, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m4_i32m1_m(vbool4_t mask, vint32m1_t maskedoff, vint16m4_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i16m8_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsum.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t dest, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint32m1_t test_vwredsum_vs_i16m8_i32m1_m(vbool2_t mask, vint32m1_t maskedoff, vint16m8_t vector, vint32m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32mf2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t dest, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32mf2_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m1_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t dest, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m1_i64m1_m(vbool32_t mask, vint64m1_t maskedoff, vint32m1_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m2_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t dest, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m2_i64m1_m(vbool16_t mask, vint64m1_t maskedoff, vint32m2_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m4_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t dest, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m4_i64m1_m(vbool8_t mask, vint64m1_t maskedoff, vint32m4_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsum_vs_i32m8_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsum.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t dest, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
-  return vwredsum(mask, dest, vector, scalar, vl);
+vint64m1_t test_vwredsum_vs_i32m8_i64m1_m(vbool4_t mask, vint64m1_t maskedoff, vint32m8_t vector, vint64m1_t scalar, size_t vl) {
+  return vwredsum(mask, maskedoff, vector, scalar, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
index c2b42d3ae63d1..4a3bcc28e4081 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vwredsumu.c
@@ -8,325 +8,325 @@
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> poison, <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1(vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> poison, <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1(vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> poison, <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1(vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> poison, <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1(vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> poison, <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1(vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> poison, <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1(vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> poison, <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1(vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> poison, <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1(vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> poison, <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1(vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> poison, <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1(vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> poison, <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1(vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> poison, <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1(vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> poison, <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1(vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> poison, <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1(vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> poison, <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1(vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> poison, <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1(vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> poison, <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1(vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> poison, <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1(vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv1i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t dest, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf8_u16m1_m(vbool64_t mask, vuint16m1_t maskedoff, vuint8mf8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv2i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t dest, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf4_u16m1_m(vbool32_t mask, vuint16m1_t maskedoff, vuint8mf4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8mf2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8mf2_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m1_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv8i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t dest, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m1_u16m1_m(vbool8_t mask, vuint16m1_t maskedoff, vuint8m1_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m2_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv16i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t dest, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m2_u16m1_m(vbool4_t mask, vuint16m1_t maskedoff, vuint8m2_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m4_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv32i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t dest, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m4_u16m1_m(vbool2_t mask, vuint16m1_t maskedoff, vuint8m4_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u8m8_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwredsumu.mask.nxv4i16.nxv64i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[VECTOR:%.*]], <vscale x 4 x i16> [[SCALAR:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t dest, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint16m1_t test_vwredsumu_vs_u8m8_u16m1_m(vbool1_t mask, vuint16m1_t maskedoff, vuint8m8_t vector, vuint16m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv1i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t dest, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf4_u32m1_m(vbool64_t mask, vuint32m1_t maskedoff, vuint16mf4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16mf2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16mf2_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m1_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv4i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t dest, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m1_u32m1_m(vbool16_t mask, vuint32m1_t maskedoff, vuint16m1_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m2_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv8i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t dest, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m2_u32m1_m(vbool8_t mask, vuint32m1_t maskedoff, vuint16m2_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m4_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv16i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t dest, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m4_u32m1_m(vbool4_t mask, vuint32m1_t maskedoff, vuint16m4_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u16m8_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwredsumu.mask.nxv2i32.nxv32i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[VECTOR:%.*]], <vscale x 2 x i32> [[SCALAR:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t dest, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint32m1_t test_vwredsumu_vs_u16m8_u32m1_m(vbool2_t mask, vuint32m1_t maskedoff, vuint16m8_t vector, vuint32m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32mf2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32mf2_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m1_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv2i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t dest, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m1_u64m1_m(vbool32_t mask, vuint64m1_t maskedoff, vuint32m1_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m2_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv4i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t dest, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m2_u64m1_m(vbool16_t mask, vuint64m1_t maskedoff, vuint32m2_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m4_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv8i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t dest, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m4_u64m1_m(vbool8_t mask, vuint64m1_t maskedoff, vuint32m4_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vwredsumu_vs_u32m8_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwredsumu.mask.nxv1i64.nxv16i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[VECTOR:%.*]], <vscale x 1 x i64> [[SCALAR:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t dest, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
-  return vwredsumu(mask, dest, vector, scalar, vl);
+vuint64m1_t test_vwredsumu_vs_u32m8_u64m1_m(vbool4_t mask, vuint64m1_t maskedoff, vuint32m8_t vector, vuint64m1_t scalar, size_t vl) {
+  return vwredsumu(mask, maskedoff, vector, scalar, vl);
 }
 


        


More information about the cfe-commits mailing list