[clang] 42eae9a - [3/7][Clang][RISCV] Remove default tail-undisturbed for vslidedown intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Mon Jan 23 23:39:24 PST 2023


Author: eopXD
Date: 2023-01-23T23:39:18-08:00
New Revision: 42eae9ad5fb40049ed09ef98a9a588e36a486f3b

URL: https://github.com/llvm/llvm-project/commit/42eae9ad5fb40049ed09ef98a9a588e36a486f3b
DIFF: https://github.com/llvm/llvm-project/commit/42eae9ad5fb40049ed09ef98a9a588e36a486f3b.diff

LOG: [3/7][Clang][RISCV] Remove default tail-undisturbed for vslidedown intrinsics

The destination parameter is removed for non-policy unmasked vslidedown
intrinsics.

The default policy for non-policy (implicit) vslidedown intrinsics will
be tail agnostic and mask undisturbed.

This is the 3rd commit of a patch-set that aims to remove the
IsPrototypeDefaultTU special case for the rvv-intrinsics.

Please refer to the cover letter in the 1st commit (D140895) for an
overview.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140937

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 623cace677c02..e5ec4e048a917 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -468,14 +468,26 @@ let UnMaskedPolicyScheme = HasPolicyOperand,
   }
 }
 
-let UnMaskedPolicyScheme = HasPolicyOperand,
-    HasMaskedOffOperand = false,
-    IsPrototypeDefaultTU = true in {
+let UnMaskedPolicyScheme = HasPassthruOperand,
+    IsPrototypeDefaultTU = false,
+    ManualCodegen = [{
+      if (IsMasked) {
+        std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
+        if (PolicyAttrs == TAIL_AGNOSTIC_MASK_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+      } else {
+        if (PolicyAttrs == TAIL_AGNOSTIC)
+          Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
+      }
+
+      Ops.push_back(ConstantInt::get(Ops.back()->getType(), PolicyAttrs));
+      IntrinsicTypes = {ResultType, Ops.back()->getType()};
+    }] in {
   multiclass RVVSlideDownBuiltinSet {
     defm "" : RVVOutBuiltinSet<NAME, "csilxfd",
-                               [["vx","v", "vvvz"]]>;
+                               [["vx","v", "vvz"]]>;
     defm "" : RVVOutBuiltinSet<NAME, "csil",
-                               [["vx","Uv", "UvUvUvz"]]>;
+                               [["vx","Uv", "UvUvz"]]>;
   }
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c
index a8f9d13cf71d7..9d0922971a732 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vslidedown.c
@@ -9,1063 +9,1063 @@
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf4(dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf2(dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m1(dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m2(dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m4(dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m8(dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32mf2(dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m1(dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m2(dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m4(dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m8(dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m1(dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m2(dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m4(dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m8(dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf8(dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf4(dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf2(dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m1(dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m2(dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m4(dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m8(dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf4(dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf2(dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m1(dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m2(dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m4(dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m8(dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32mf2(dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m1(dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m2(dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m4(dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m8(dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m1(dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m2(dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m4(dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m8(dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf8(dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf4(dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf2(dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m1(dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m2(dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m4(dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m8(dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf4(dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf2(dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m1(dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m2(dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m4(dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m8(dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32mf2(dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32mf2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m1(dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m2(dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m4(dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m8(dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m1(dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m1(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m2(dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m2(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m4(dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m4(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m8(dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m8(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf4_m(mask, dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf2_m(mask, dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m1_m(mask, dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m2_m(mask, dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m4_m(mask, dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m8_m(mask, dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32mf2_m(mask, dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m1_m(mask, dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m2_m(mask, dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m4_m(mask, dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m8_m(mask, dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m1_m(mask, dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m2_m(mask, dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m4_m(mask, dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m8_m(mask, dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf8_m(mask, dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf4_m(mask, dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf2_m(mask, dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m1_m(mask, dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m2_m(mask, dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m4_m(mask, dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m8_m(mask, dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf4_m(mask, dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf2_m(mask, dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m1_m(mask, dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m2_m(mask, dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m4_m(mask, dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m8_m(mask, dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32mf2_m(mask, dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m1_m(mask, dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m2_m(mask, dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m4_m(mask, dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m8_m(mask, dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m1_m(mask, dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m2_m(mask, dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m4_m(mask, dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m8_m(mask, dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf8_m(mask, dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf4_m(mask, dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf2_m(mask, dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m1_m(mask, dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m2_m(mask, dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m4_m(mask, dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m8_m(mask, dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf4_m(mask, dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf2_m(mask, dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m1_m(mask, dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m2_m(mask, dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m4_m(mask, dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m8_m(mask, dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32mf2_m(mask, dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32mf2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m1_m(mask, dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m2_m(mask, dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m4_m(mask, dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m8_m(mask, dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m8_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m1_m(mask, dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m1_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m2_m(mask, dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m2_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m4_m(mask, dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m4_m(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m8_m(mask, dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m8_m(mask, maskedoff, src, offset, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
index 994b9d2ec628b..d7bcf0a137030 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vslidedown.c
@@ -9,1063 +9,1063 @@
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4(vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2(vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1(vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2(vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4(vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8(vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_m(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_m(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_m(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_m(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_m(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_m(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown(mask, dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown(mask, maskedoff, src, offset, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c
index 4e36f5feb4a13..327bdc6b86fe5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vslidedown.c
@@ -540,533 +540,533 @@ vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf4_ta(dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf2_ta(dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m1_ta(dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m2_ta(dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m4_ta(dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m8_ta(dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32mf2_ta(dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m1_ta(dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m2_ta(dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m4_ta(dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m8_ta(dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m1_ta(dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m2_ta(dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m4_ta(dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m8_ta(dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf8_ta(dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf4_ta(dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf2_ta(dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m1_ta(dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m2_ta(dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m4_ta(dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m8_ta(dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf4_ta(dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf2_ta(dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m1_ta(dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m2_ta(dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m4_ta(dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m8_ta(dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32mf2_ta(dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m1_ta(dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m2_ta(dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m4_ta(dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m8_ta(dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m1_ta(dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m2_ta(dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m4_ta(dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m8_ta(dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf8_ta(dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf4_ta(dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf2_ta(dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m1_ta(dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m2_ta(dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m4_ta(dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m8_ta(dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf4_ta(dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf2_ta(dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m1_ta(dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m2_ta(dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m4_ta(dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m8_ta(dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32mf2_ta(dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32mf2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m1_ta(dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m2_ta(dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m4_ta(dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m8_ta(dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m1_ta(dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m1_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m2_ta(dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m2_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m4_ta(dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m4_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m8_ta(dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m8_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tuma(
@@ -2133,533 +2133,533 @@ vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_tama(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf4_tama(mask, dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_tama(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_tama(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16mf2_tama(mask, dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_tama(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_tama(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m1_tama(mask, dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_tama(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_tama(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m2_tama(mask, dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_tama(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_tama(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m4_tama(mask, dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_tama(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_tama(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f16m8_tama(mask, dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_tama(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f16m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32mf2_tama(mask, dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_tama(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m1_tama(mask, dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_tama(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_tama(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m2_tama(mask, dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_tama(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_tama(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m4_tama(mask, dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_tama(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_tama(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f32m8_tama(mask, dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_tama(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f32m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_tama(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m1_tama(mask, dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_tama(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_tama(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m2_tama(mask, dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_tama(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_tama(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m4_tama(mask, dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_tama(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_tama(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_f64m8_tama(mask, dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_tama(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_f64m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf8_tama(mask, dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf4_tama(mask, dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8mf2_tama(mask, dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_tama(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m1_tama(mask, dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_tama(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_tama(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m2_tama(mask, dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_tama(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_tama(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m4_tama(mask, dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_tama(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_tama(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i8m8_tama(mask, dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_tama(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i8m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf4_tama(mask, dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16mf2_tama(mask, dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_tama(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m1_tama(mask, dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_tama(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_tama(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m2_tama(mask, dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_tama(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_tama(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m4_tama(mask, dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_tama(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_tama(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i16m8_tama(mask, dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_tama(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i16m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32mf2_tama(mask, dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_tama(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m1_tama(mask, dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_tama(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_tama(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m2_tama(mask, dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_tama(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_tama(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m4_tama(mask, dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_tama(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_tama(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i32m8_tama(mask, dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_tama(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i32m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_tama(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m1_tama(mask, dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_tama(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_tama(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m2_tama(mask, dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_tama(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_tama(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m4_tama(mask, dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_tama(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_tama(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_i64m8_tama(mask, dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_tama(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_i64m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf8_tama(mask, dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf4_tama(mask, dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8mf2_tama(mask, dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_tama(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m1_tama(mask, dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_tama(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_tama(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m2_tama(mask, dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_tama(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_tama(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m4_tama(mask, dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_tama(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_tama(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u8m8_tama(mask, dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_tama(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u8m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf4_tama(mask, dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16mf2_tama(mask, dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_tama(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m1_tama(mask, dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_tama(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_tama(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m2_tama(mask, dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_tama(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_tama(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m4_tama(mask, dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_tama(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_tama(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u16m8_tama(mask, dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_tama(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u16m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32mf2_tama(mask, dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32mf2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_tama(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m1_tama(mask, dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_tama(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_tama(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m2_tama(mask, dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_tama(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_tama(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m4_tama(mask, dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_tama(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_tama(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u32m8_tama(mask, dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_tama(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u32m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_tama(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m1_tama(mask, dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_tama(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m1_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_tama(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m2_tama(mask, dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_tama(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m2_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_tama(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m4_tama(mask, dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_tama(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m4_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_tama(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_vx_u64m8_tama(mask, dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_tama(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_vx_u64m8_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tamu(

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
index 966474aaac539..99cebec2583e0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vslidedown.c
@@ -540,533 +540,533 @@ vuint64m8_t test_vslidedown_vx_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_ta(vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_ta(vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_ta(vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_ta(vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_ta(vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_ta(vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_ta(vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_ta(vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_ta(vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_ta(vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_ta(vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_ta(vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_ta(vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_ta(vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_ta(vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_ta(vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_ta(vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_ta(vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_ta(vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_ta(vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_ta(vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_ta(vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_ta(vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_ta(vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_ta(vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_ta(vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_ta(vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_ta(vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_ta(vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_ta(vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_ta(vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_ta(vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_ta(vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_ta(vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_ta(vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_ta(vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_ta(vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_ta(vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_ta(vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_ta(vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_ta(vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_ta(vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_ta(vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_ta(vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_ta(vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_ta(vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_ta(vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_ta(vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_ta(vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_ta(vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_ta(vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_ta(vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_ta(vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_ta(vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_ta(vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_ta(vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_ta(vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_ta(vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_ta(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]], i64 1)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_ta(dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_ta(vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_ta(src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tuma(
@@ -2133,533 +2133,533 @@ vuint64m8_t test_vslidedown_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t maskedoff,
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vslidedown_vx_f16mf4_tama(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16mf4_t test_vslidedown_vx_f16mf4_tama(vbool64_t mask, vfloat16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vslidedown_vx_f16mf2_tama(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16mf2_t test_vslidedown_vx_f16mf2_tama(vbool32_t mask, vfloat16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vslidedown_vx_f16m1_tama(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16m1_t test_vslidedown_vx_f16m1_tama(vbool16_t mask, vfloat16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vslidedown_vx_f16m2_tama(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16m2_t test_vslidedown_vx_f16m2_tama(vbool8_t mask, vfloat16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vslidedown_vx_f16m4_tama(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16m4_t test_vslidedown_vx_f16m4_tama(vbool4_t mask, vfloat16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vslidedown.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vslidedown_vx_f16m8_tama(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat16m8_t test_vslidedown_vx_f16m8_tama(vbool2_t mask, vfloat16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat32mf2_t test_vslidedown_vx_f32mf2_tama(vbool64_t mask, vfloat32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vslidedown_vx_f32m1_tama(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat32m1_t test_vslidedown_vx_f32m1_tama(vbool32_t mask, vfloat32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vslidedown_vx_f32m2_tama(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat32m2_t test_vslidedown_vx_f32m2_tama(vbool16_t mask, vfloat32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vslidedown_vx_f32m4_tama(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat32m4_t test_vslidedown_vx_f32m4_tama(vbool8_t mask, vfloat32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vslidedown_vx_f32m8_tama(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat32m8_t test_vslidedown_vx_f32m8_tama(vbool4_t mask, vfloat32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vslidedown_vx_f64m1_tama(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat64m1_t test_vslidedown_vx_f64m1_tama(vbool64_t mask, vfloat64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vslidedown_vx_f64m2_tama(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat64m2_t test_vslidedown_vx_f64m2_tama(vbool32_t mask, vfloat64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vslidedown_vx_f64m4_tama(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat64m4_t test_vslidedown_vx_f64m4_tama(vbool16_t mask, vfloat64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vslidedown_vx_f64m8_tama(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vfloat64m8_t test_vslidedown_vx_f64m8_tama(vbool8_t mask, vfloat64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vslidedown_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8mf8_t test_vslidedown_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vslidedown_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8mf4_t test_vslidedown_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vslidedown_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8mf2_t test_vslidedown_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vslidedown_vx_i8m1_tama(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8m1_t test_vslidedown_vx_i8m1_tama(vbool8_t mask, vint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vslidedown_vx_i8m2_tama(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8m2_t test_vslidedown_vx_i8m2_tama(vbool4_t mask, vint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vslidedown_vx_i8m4_tama(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8m4_t test_vslidedown_vx_i8m4_tama(vbool2_t mask, vint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vslidedown_vx_i8m8_tama(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint8m8_t test_vslidedown_vx_i8m8_tama(vbool1_t mask, vint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vslidedown_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16mf4_t test_vslidedown_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vslidedown_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16mf2_t test_vslidedown_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vslidedown_vx_i16m1_tama(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16m1_t test_vslidedown_vx_i16m1_tama(vbool16_t mask, vint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vslidedown_vx_i16m2_tama(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16m2_t test_vslidedown_vx_i16m2_tama(vbool8_t mask, vint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vslidedown_vx_i16m4_tama(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16m4_t test_vslidedown_vx_i16m4_tama(vbool4_t mask, vint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vslidedown_vx_i16m8_tama(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint16m8_t test_vslidedown_vx_i16m8_tama(vbool2_t mask, vint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint32mf2_t test_vslidedown_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vslidedown_vx_i32m1_tama(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint32m1_t test_vslidedown_vx_i32m1_tama(vbool32_t mask, vint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vslidedown_vx_i32m2_tama(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint32m2_t test_vslidedown_vx_i32m2_tama(vbool16_t mask, vint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vslidedown_vx_i32m4_tama(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint32m4_t test_vslidedown_vx_i32m4_tama(vbool8_t mask, vint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vslidedown_vx_i32m8_tama(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint32m8_t test_vslidedown_vx_i32m8_tama(vbool4_t mask, vint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vslidedown_vx_i64m1_tama(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint64m1_t test_vslidedown_vx_i64m1_tama(vbool64_t mask, vint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vslidedown_vx_i64m2_tama(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint64m2_t test_vslidedown_vx_i64m2_tama(vbool32_t mask, vint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vslidedown_vx_i64m4_tama(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint64m4_t test_vslidedown_vx_i64m4_tama(vbool16_t mask, vint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vslidedown_vx_i64m8_tama(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vint64m8_t test_vslidedown_vx_i64m8_tama(vbool8_t mask, vint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vslidedown_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8mf8_t test_vslidedown_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vslidedown_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8mf4_t test_vslidedown_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vslidedown_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8mf2_t test_vslidedown_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vslidedown_vx_u8m1_tama(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8m1_t test_vslidedown_vx_u8m1_tama(vbool8_t mask, vuint8m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vslidedown_vx_u8m2_tama(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8m2_t test_vslidedown_vx_u8m2_tama(vbool4_t mask, vuint8m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vslidedown_vx_u8m4_tama(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8m4_t test_vslidedown_vx_u8m4_tama(vbool2_t mask, vuint8m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vslidedown_vx_u8m8_tama(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint8m8_t test_vslidedown_vx_u8m8_tama(vbool1_t mask, vuint8m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vslidedown_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16mf4_t test_vslidedown_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vslidedown_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16mf2_t test_vslidedown_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vslidedown_vx_u16m1_tama(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16m1_t test_vslidedown_vx_u16m1_tama(vbool16_t mask, vuint16m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vslidedown_vx_u16m2_tama(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16m2_t test_vslidedown_vx_u16m2_tama(vbool8_t mask, vuint16m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vslidedown_vx_u16m4_tama(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16m4_t test_vslidedown_vx_u16m4_tama(vbool4_t mask, vuint16m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vslidedown_vx_u16m8_tama(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint16m8_t test_vslidedown_vx_u16m8_tama(vbool2_t mask, vuint16m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint32mf2_t test_vslidedown_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vslidedown_vx_u32m1_tama(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint32m1_t test_vslidedown_vx_u32m1_tama(vbool32_t mask, vuint32m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vslidedown_vx_u32m2_tama(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint32m2_t test_vslidedown_vx_u32m2_tama(vbool16_t mask, vuint32m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vslidedown_vx_u32m4_tama(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint32m4_t test_vslidedown_vx_u32m4_tama(vbool8_t mask, vuint32m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vslidedown_vx_u32m8_tama(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint32m8_t test_vslidedown_vx_u32m8_tama(vbool4_t mask, vuint32m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vslidedown_vx_u64m1_tama(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint64m1_t test_vslidedown_vx_u64m1_tama(vbool64_t mask, vuint64m1_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vslidedown_vx_u64m2_tama(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint64m2_t test_vslidedown_vx_u64m2_tama(vbool32_t mask, vuint64m2_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vslidedown_vx_u64m4_tama(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint64m4_t test_vslidedown_vx_u64m4_tama(vbool16_t mask, vuint64m4_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_tama(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vslidedown_vx_u64m8_tama(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t offset, size_t vl) {
-  return vslidedown_tama(mask, dest, src, offset, vl);
+vuint64m8_t test_vslidedown_vx_u64m8_tama(vbool8_t mask, vuint64m8_t src, size_t offset, size_t vl) {
+  return vslidedown_tama(mask, src, offset, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vslidedown_vx_f16mf4_tamu(


        


More information about the cfe-commits mailing list