[clang] 14261c1 - [6/7][Clang][RISCV] Remove default tail-undisturbed for vmv_s_x and vfmv_s_f intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Mon Jan 23 23:53:32 PST 2023


Author: eopXD
Date: 2023-01-23T23:53:27-08:00
New Revision: 14261c1991fe49e3a4ffcc58cdc5c5b91dc81ba9

URL: https://github.com/llvm/llvm-project/commit/14261c1991fe49e3a4ffcc58cdc5c5b91dc81ba9
DIFF: https://github.com/llvm/llvm-project/commit/14261c1991fe49e3a4ffcc58cdc5c5b91dc81ba9.diff

LOG: [6/7][Clang][RISCV] Remove default tail-undisturbed for vmv_s_x and vfmv_s_f intrinsics

The destination parameter is removed for non-policy unmasked intrinsics.

The default policy for non-policy (implicit) vmv_s_x and vfmv_s_f
intrinsics will be tail agnostic and mask undisturbed.

Upon change of prototype, vmv_s_x and vfmv_s_f is no longer able to
have an overloading version for its non-policy and tail agnostic
intrinsics.

This is the 6th commit of a patch-set that aims to remove the
IsPrototypeDefaultTU special case for the rvv-intrinsics.

Please refer to the cover letter in the 1st commit (D140895) for an
overview.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140947

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 630f32411b69a..6c6515b2f245c 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2190,12 +2190,12 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
     defm vmv_x : RVVOp0BuiltinSet<"vmv_x_s", "csil",
                                    [["s", "ve", "ev"],
                                     ["s", "UvUe", "UeUv"]]>;
-  let OverloadedName = "vmv_s", IsPrototypeDefaultTU = true,
+  let OverloadedName = "vmv_s", IsPrototypeDefaultTU = false,
       UnMaskedPolicyScheme = HasPassthruOperand,
       SupportOverloading = false in
     defm vmv_s : RVVOutBuiltinSet<"vmv_s_x", "csil",
-                                   [["x", "v", "vve"],
-                                    ["x", "Uv", "UvUvUe"]]>;
+                                   [["x", "v", "ve"],
+                                    ["x", "Uv", "UvUe"]]>;
 }
 
 // 17.2. Floating-Point Scalar Move Instructions
@@ -2203,12 +2203,12 @@ let HasMasked = false, MaskedPolicyScheme = NonePolicy in {
   let HasVL = false, OverloadedName = "vfmv_f" in
     defm vfmv_f : RVVOp0BuiltinSet<"vfmv_f_s", "xfd",
                                      [["s", "ve", "ev"]]>;
-  let OverloadedName = "vfmv_s", IsPrototypeDefaultTU = true,
+  let OverloadedName = "vfmv_s", IsPrototypeDefaultTU = false,
       UnMaskedPolicyScheme = HasPassthruOperand,
       SupportOverloading = false in
     defm vfmv_s : RVVOutBuiltinSet<"vfmv_s_f", "xfd",
-                                     [["f", "v", "vve"],
-                                      ["x", "Uv", "UvUvUe"]]>;
+                                     [["f", "v", "ve"],
+                                      ["x", "Uv", "UvUe"]]>;
 }
 
 // 17.3. Vector Slide Instructions

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c
index b60de7ad5451f..b405a2526dade 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmv.c
@@ -153,11 +153,11 @@ _Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16.i64(<vscale x 1 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmv_s_f_f16mf4(vfloat16mf4_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16mf4(dest, src, vl);
+vfloat16mf4_t test_vfmv_s_f_f16mf4(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16mf4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16(
@@ -171,11 +171,11 @@ _Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16.i64(<vscale x 2 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmv_s_f_f16mf2(vfloat16mf2_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16mf2(dest, src, vl);
+vfloat16mf2_t test_vfmv_s_f_f16mf2(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16(
@@ -189,11 +189,11 @@ _Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16.i64(<vscale x 4 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmv_s_f_f16m1(vfloat16m1_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16m1(dest, src, vl);
+vfloat16m1_t test_vfmv_s_f_f16m1(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16(
@@ -207,11 +207,11 @@ _Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16.i64(<vscale x 8 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmv_s_f_f16m2(vfloat16m2_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16m2(dest, src, vl);
+vfloat16m2_t test_vfmv_s_f_f16m2(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16(
@@ -225,11 +225,11 @@ _Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16.i64(<vscale x 16 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmv_s_f_f16m4(vfloat16m4_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16m4(dest, src, vl);
+vfloat16m4_t test_vfmv_s_f_f16m4(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16(
@@ -243,11 +243,11 @@ _Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16.i64(<vscale x 32 x half> poison, half [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmv_s_f_f16m8(vfloat16m8_t dest, _Float16 src, size_t vl) {
-  return vfmv_s_f_f16m8(dest, src, vl);
+vfloat16m8_t test_vfmv_s_f_f16m8(_Float16 src, size_t vl) {
+  return vfmv_s_f_f16m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32(
@@ -261,11 +261,11 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dest, float src, size_t vl) {
-  return vfmv_s_f_f32mf2(dest, src, vl);
+vfloat32mf2_t test_vfmv_s_f_f32mf2(float src, size_t vl) {
+  return vfmv_s_f_f32mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32(
@@ -279,11 +279,11 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32.i64(<vscale x 2 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dest, float src, size_t vl) {
-  return vfmv_s_f_f32m1(dest, src, vl);
+vfloat32m1_t test_vfmv_s_f_f32m1(float src, size_t vl) {
+  return vfmv_s_f_f32m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32(
@@ -297,11 +297,11 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32.i64(<vscale x 4 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dest, float src, size_t vl) {
-  return vfmv_s_f_f32m2(dest, src, vl);
+vfloat32m2_t test_vfmv_s_f_f32m2(float src, size_t vl) {
+  return vfmv_s_f_f32m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32(
@@ -315,11 +315,11 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dest, float src, size_t vl) {
-  return vfmv_s_f_f32m4(dest, src, vl);
+vfloat32m4_t test_vfmv_s_f_f32m4(float src, size_t vl) {
+  return vfmv_s_f_f32m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32(
@@ -333,11 +333,11 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32.i64(<vscale x 16 x float> poison, float [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dest, float src, size_t vl) {
-  return vfmv_s_f_f32m8(dest, src, vl);
+vfloat32m8_t test_vfmv_s_f_f32m8(float src, size_t vl) {
+  return vfmv_s_f_f32m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64(
@@ -351,11 +351,11 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64.i64(<vscale x 1 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dest, double src, size_t vl) {
-  return vfmv_s_f_f64m1(dest, src, vl);
+vfloat64m1_t test_vfmv_s_f_f64m1(double src, size_t vl) {
+  return vfmv_s_f_f64m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64(
@@ -369,11 +369,11 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64.i64(<vscale x 2 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dest, double src, size_t vl) {
-  return vfmv_s_f_f64m2(dest, src, vl);
+vfloat64m2_t test_vfmv_s_f_f64m2(double src, size_t vl) {
+  return vfmv_s_f_f64m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64(
@@ -387,11 +387,11 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64.i64(<vscale x 4 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dest, double src, size_t vl) {
-  return vfmv_s_f_f64m4(dest, src, vl);
+vfloat64m4_t test_vfmv_s_f_f64m4(double src, size_t vl) {
+  return vfmv_s_f_f64m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64(
@@ -405,10 +405,10 @@ double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64.i64(<vscale x 8 x double> poison, double [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dest, double src, size_t vl) {
-  return vfmv_s_f_f64m8(dest, src, vl);
+vfloat64m8_t test_vfmv_s_f_f64m8(double src, size_t vl) {
+  return vfmv_s_f_f64m8(src, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
index 345f93c894489..2e4dc657420a5 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmv.c
@@ -945,11 +945,11 @@ int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8mf8(dest, src, vl);
+vint8mf8_t test_vmv_s_x_i8mf8(int8_t src, size_t vl) {
+  return vmv_s_x_i8mf8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
@@ -963,11 +963,11 @@ int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8mf4(dest, src, vl);
+vint8mf4_t test_vmv_s_x_i8mf4(int8_t src, size_t vl) {
+  return vmv_s_x_i8mf4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
@@ -981,11 +981,11 @@ int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8mf2(dest, src, vl);
+vint8mf2_t test_vmv_s_x_i8mf2(int8_t src, size_t vl) {
+  return vmv_s_x_i8mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
@@ -999,11 +999,11 @@ int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8m1(dest, src, vl);
+vint8m1_t test_vmv_s_x_i8m1(int8_t src, size_t vl) {
+  return vmv_s_x_i8m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
@@ -1017,11 +1017,11 @@ int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8m2(dest, src, vl);
+vint8m2_t test_vmv_s_x_i8m2(int8_t src, size_t vl) {
+  return vmv_s_x_i8m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
@@ -1035,11 +1035,11 @@ int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8m4(dest, src, vl);
+vint8m4_t test_vmv_s_x_i8m4(int8_t src, size_t vl) {
+  return vmv_s_x_i8m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
@@ -1053,11 +1053,11 @@ int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dest, int8_t src, size_t vl) {
-  return vmv_s_x_i8m8(dest, src, vl);
+vint8m8_t test_vmv_s_x_i8m8(int8_t src, size_t vl) {
+  return vmv_s_x_i8m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
@@ -1071,11 +1071,11 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16mf4(dest, src, vl);
+vint16mf4_t test_vmv_s_x_i16mf4(int16_t src, size_t vl) {
+  return vmv_s_x_i16mf4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
@@ -1089,11 +1089,11 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16mf2(dest, src, vl);
+vint16mf2_t test_vmv_s_x_i16mf2(int16_t src, size_t vl) {
+  return vmv_s_x_i16mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
@@ -1107,11 +1107,11 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16m1(dest, src, vl);
+vint16m1_t test_vmv_s_x_i16m1(int16_t src, size_t vl) {
+  return vmv_s_x_i16m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
@@ -1125,11 +1125,11 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16m2(dest, src, vl);
+vint16m2_t test_vmv_s_x_i16m2(int16_t src, size_t vl) {
+  return vmv_s_x_i16m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
@@ -1143,11 +1143,11 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16m4(dest, src, vl);
+vint16m4_t test_vmv_s_x_i16m4(int16_t src, size_t vl) {
+  return vmv_s_x_i16m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
@@ -1161,11 +1161,11 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dest, int16_t src, size_t vl) {
-  return vmv_s_x_i16m8(dest, src, vl);
+vint16m8_t test_vmv_s_x_i16m8(int16_t src, size_t vl) {
+  return vmv_s_x_i16m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
@@ -1179,11 +1179,11 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dest, int32_t src, size_t vl) {
-  return vmv_s_x_i32mf2(dest, src, vl);
+vint32mf2_t test_vmv_s_x_i32mf2(int32_t src, size_t vl) {
+  return vmv_s_x_i32mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
@@ -1197,11 +1197,11 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dest, int32_t src, size_t vl) {
-  return vmv_s_x_i32m1(dest, src, vl);
+vint32m1_t test_vmv_s_x_i32m1(int32_t src, size_t vl) {
+  return vmv_s_x_i32m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
@@ -1215,11 +1215,11 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dest, int32_t src, size_t vl) {
-  return vmv_s_x_i32m2(dest, src, vl);
+vint32m2_t test_vmv_s_x_i32m2(int32_t src, size_t vl) {
+  return vmv_s_x_i32m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
@@ -1233,11 +1233,11 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dest, int32_t src, size_t vl) {
-  return vmv_s_x_i32m4(dest, src, vl);
+vint32m4_t test_vmv_s_x_i32m4(int32_t src, size_t vl) {
+  return vmv_s_x_i32m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
@@ -1251,11 +1251,11 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dest, int32_t src, size_t vl) {
-  return vmv_s_x_i32m8(dest, src, vl);
+vint32m8_t test_vmv_s_x_i32m8(int32_t src, size_t vl) {
+  return vmv_s_x_i32m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
@@ -1269,11 +1269,11 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dest, int64_t src, size_t vl) {
-  return vmv_s_x_i64m1(dest, src, vl);
+vint64m1_t test_vmv_s_x_i64m1(int64_t src, size_t vl) {
+  return vmv_s_x_i64m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
@@ -1287,11 +1287,11 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dest, int64_t src, size_t vl) {
-  return vmv_s_x_i64m2(dest, src, vl);
+vint64m2_t test_vmv_s_x_i64m2(int64_t src, size_t vl) {
+  return vmv_s_x_i64m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
@@ -1305,11 +1305,11 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dest, int64_t src, size_t vl) {
-  return vmv_s_x_i64m4(dest, src, vl);
+vint64m4_t test_vmv_s_x_i64m4(int64_t src, size_t vl) {
+  return vmv_s_x_i64m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
@@ -1323,11 +1323,11 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dest, int64_t src, size_t vl) {
-  return vmv_s_x_i64m8(dest, src, vl);
+vint64m8_t test_vmv_s_x_i64m8(int64_t src, size_t vl) {
+  return vmv_s_x_i64m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
@@ -1341,11 +1341,11 @@ uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8mf8(dest, src, vl);
+vuint8mf8_t test_vmv_s_x_u8mf8(uint8_t src, size_t vl) {
+  return vmv_s_x_u8mf8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
@@ -1359,11 +1359,11 @@ uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8mf4(dest, src, vl);
+vuint8mf4_t test_vmv_s_x_u8mf4(uint8_t src, size_t vl) {
+  return vmv_s_x_u8mf4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
@@ -1377,11 +1377,11 @@ uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8mf2(dest, src, vl);
+vuint8mf2_t test_vmv_s_x_u8mf2(uint8_t src, size_t vl) {
+  return vmv_s_x_u8mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
@@ -1395,11 +1395,11 @@ uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8m1(dest, src, vl);
+vuint8m1_t test_vmv_s_x_u8m1(uint8_t src, size_t vl) {
+  return vmv_s_x_u8m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
@@ -1413,11 +1413,11 @@ uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8m2(dest, src, vl);
+vuint8m2_t test_vmv_s_x_u8m2(uint8_t src, size_t vl) {
+  return vmv_s_x_u8m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
@@ -1431,11 +1431,11 @@ uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8m4(dest, src, vl);
+vuint8m4_t test_vmv_s_x_u8m4(uint8_t src, size_t vl) {
+  return vmv_s_x_u8m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
@@ -1449,11 +1449,11 @@ uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> poison, i8 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dest, uint8_t src, size_t vl) {
-  return vmv_s_x_u8m8(dest, src, vl);
+vuint8m8_t test_vmv_s_x_u8m8(uint8_t src, size_t vl) {
+  return vmv_s_x_u8m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
@@ -1467,11 +1467,11 @@ uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16mf4(dest, src, vl);
+vuint16mf4_t test_vmv_s_x_u16mf4(uint16_t src, size_t vl) {
+  return vmv_s_x_u16mf4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
@@ -1485,11 +1485,11 @@ uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16mf2(dest, src, vl);
+vuint16mf2_t test_vmv_s_x_u16mf2(uint16_t src, size_t vl) {
+  return vmv_s_x_u16mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
@@ -1503,11 +1503,11 @@ uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16m1(dest, src, vl);
+vuint16m1_t test_vmv_s_x_u16m1(uint16_t src, size_t vl) {
+  return vmv_s_x_u16m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
@@ -1521,11 +1521,11 @@ uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16m2(dest, src, vl);
+vuint16m2_t test_vmv_s_x_u16m2(uint16_t src, size_t vl) {
+  return vmv_s_x_u16m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
@@ -1539,11 +1539,11 @@ uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16m4(dest, src, vl);
+vuint16m4_t test_vmv_s_x_u16m4(uint16_t src, size_t vl) {
+  return vmv_s_x_u16m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
@@ -1557,11 +1557,11 @@ uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> poison, i16 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dest, uint16_t src, size_t vl) {
-  return vmv_s_x_u16m8(dest, src, vl);
+vuint16m8_t test_vmv_s_x_u16m8(uint16_t src, size_t vl) {
+  return vmv_s_x_u16m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
@@ -1575,11 +1575,11 @@ uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dest, uint32_t src, size_t vl) {
-  return vmv_s_x_u32mf2(dest, src, vl);
+vuint32mf2_t test_vmv_s_x_u32mf2(uint32_t src, size_t vl) {
+  return vmv_s_x_u32mf2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
@@ -1593,11 +1593,11 @@ uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dest, uint32_t src, size_t vl) {
-  return vmv_s_x_u32m1(dest, src, vl);
+vuint32m1_t test_vmv_s_x_u32m1(uint32_t src, size_t vl) {
+  return vmv_s_x_u32m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
@@ -1611,11 +1611,11 @@ uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dest, uint32_t src, size_t vl) {
-  return vmv_s_x_u32m2(dest, src, vl);
+vuint32m2_t test_vmv_s_x_u32m2(uint32_t src, size_t vl) {
+  return vmv_s_x_u32m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
@@ -1629,11 +1629,11 @@ uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dest, uint32_t src, size_t vl) {
-  return vmv_s_x_u32m4(dest, src, vl);
+vuint32m4_t test_vmv_s_x_u32m4(uint32_t src, size_t vl) {
+  return vmv_s_x_u32m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
@@ -1647,11 +1647,11 @@ uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> poison, i32 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dest, uint32_t src, size_t vl) {
-  return vmv_s_x_u32m8(dest, src, vl);
+vuint32m8_t test_vmv_s_x_u32m8(uint32_t src, size_t vl) {
+  return vmv_s_x_u32m8(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
@@ -1665,11 +1665,11 @@ uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dest, uint64_t src, size_t vl) {
-  return vmv_s_x_u64m1(dest, src, vl);
+vuint64m1_t test_vmv_s_x_u64m1(uint64_t src, size_t vl) {
+  return vmv_s_x_u64m1(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
@@ -1683,11 +1683,11 @@ uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dest, uint64_t src, size_t vl) {
-  return vmv_s_x_u64m2(dest, src, vl);
+vuint64m2_t test_vmv_s_x_u64m2(uint64_t src, size_t vl) {
+  return vmv_s_x_u64m2(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
@@ -1701,11 +1701,11 @@ uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dest, uint64_t src, size_t vl) {
-  return vmv_s_x_u64m4(dest, src, vl);
+vuint64m4_t test_vmv_s_x_u64m4(uint64_t src, size_t vl) {
+  return vmv_s_x_u64m4(src, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
@@ -1719,10 +1719,10 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
 
 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> poison, i64 [[SRC:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dest, uint64_t src, size_t vl) {
-  return vmv_s_x_u64m8(dest, src, vl);
+vuint64m8_t test_vmv_s_x_u64m8(uint64_t src, size_t vl) {
+  return vmv_s_x_u64m8(src, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
index cb5030f4db656..a86aa2f44cc25 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmv.c
@@ -16,15 +16,6 @@ _Float16 test_vfmv_f_s_f16mf4_f16(vfloat16mf4_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmv.s.f.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
-//
-vfloat16mf4_t test_vfmv_s_f_f16mf4(vfloat16mf4_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16mf2_f16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv2f16(<vscale x 2 x half> [[SRC:%.*]])
@@ -34,15 +25,6 @@ _Float16 test_vfmv_f_s_f16mf2_f16(vfloat16mf2_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmv.s.f.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
-//
-vfloat16mf2_t test_vfmv_s_f_f16mf2(vfloat16mf2_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m1_f16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv4f16(<vscale x 4 x half> [[SRC:%.*]])
@@ -52,15 +34,6 @@ _Float16 test_vfmv_f_s_f16m1_f16(vfloat16m1_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmv.s.f.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
-//
-vfloat16m1_t test_vfmv_s_f_f16m1(vfloat16m1_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m2_f16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv8f16(<vscale x 8 x half> [[SRC:%.*]])
@@ -70,15 +43,6 @@ _Float16 test_vfmv_f_s_f16m2_f16(vfloat16m2_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmv.s.f.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
-//
-vfloat16m2_t test_vfmv_s_f_f16m2(vfloat16m2_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m4_f16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv16f16(<vscale x 16 x half> [[SRC:%.*]])
@@ -88,15 +52,6 @@ _Float16 test_vfmv_f_s_f16m4_f16(vfloat16m4_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmv.s.f.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
-//
-vfloat16m4_t test_vfmv_s_f_f16m4(vfloat16m4_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f16m8_f16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call half @llvm.riscv.vfmv.f.s.nxv32f16(<vscale x 32 x half> [[SRC:%.*]])
@@ -106,15 +61,6 @@ _Float16 test_vfmv_f_s_f16m8_f16(vfloat16m8_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmv.s.f.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], half [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
-//
-vfloat16m8_t test_vfmv_s_f_f16m8(vfloat16m8_t dest, _Float16 src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32mf2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv1f32(<vscale x 1 x float> [[SRC:%.*]])
@@ -124,15 +70,6 @@ float test_vfmv_f_s_f32mf2_f32(vfloat32mf2_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmv.s.f.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
-//
-vfloat32mf2_t test_vfmv_s_f_f32mf2(vfloat32mf2_t dest, float src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m1_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv2f32(<vscale x 2 x float> [[SRC:%.*]])
@@ -142,15 +79,6 @@ float test_vfmv_f_s_f32m1_f32(vfloat32m1_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmv.s.f.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
-//
-vfloat32m1_t test_vfmv_s_f_f32m1(vfloat32m1_t dest, float src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m2_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv4f32(<vscale x 4 x float> [[SRC:%.*]])
@@ -160,15 +88,6 @@ float test_vfmv_f_s_f32m2_f32(vfloat32m2_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmv.s.f.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
-//
-vfloat32m2_t test_vfmv_s_f_f32m2(vfloat32m2_t dest, float src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m4_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv8f32(<vscale x 8 x float> [[SRC:%.*]])
@@ -178,15 +97,6 @@ float test_vfmv_f_s_f32m4_f32(vfloat32m4_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmv.s.f.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
-//
-vfloat32m4_t test_vfmv_s_f_f32m4(vfloat32m4_t dest, float src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f32m8_f32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call float @llvm.riscv.vfmv.f.s.nxv16f32(<vscale x 16 x float> [[SRC:%.*]])
@@ -196,15 +106,6 @@ float test_vfmv_f_s_f32m8_f32(vfloat32m8_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmv.s.f.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], float [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
-//
-vfloat32m8_t test_vfmv_s_f_f32m8(vfloat32m8_t dest, float src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m1_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv1f64(<vscale x 1 x double> [[SRC:%.*]])
@@ -214,15 +115,6 @@ double test_vfmv_f_s_f64m1_f64(vfloat64m1_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmv.s.f.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
-//
-vfloat64m1_t test_vfmv_s_f_f64m1(vfloat64m1_t dest, double src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m2_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv2f64(<vscale x 2 x double> [[SRC:%.*]])
@@ -232,15 +124,6 @@ double test_vfmv_f_s_f64m2_f64(vfloat64m2_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmv.s.f.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
-//
-vfloat64m2_t test_vfmv_s_f_f64m2(vfloat64m2_t dest, double src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m4_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv4f64(<vscale x 4 x double> [[SRC:%.*]])
@@ -250,15 +133,6 @@ double test_vfmv_f_s_f64m4_f64(vfloat64m4_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmv.s.f.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
-//
-vfloat64m4_t test_vfmv_s_f_f64m4(vfloat64m4_t dest, double src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vfmv_f_s_f64m8_f64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call double @llvm.riscv.vfmv.f.s.nxv8f64(<vscale x 8 x double> [[SRC:%.*]])
@@ -268,12 +142,3 @@ double test_vfmv_f_s_f64m8_f64(vfloat64m8_t src) {
   return vfmv_f(src);
 }
 
-// CHECK-RV64-LABEL: @test_vfmv_s_f_f64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmv.s.f.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], double [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
-//
-vfloat64m8_t test_vfmv_s_f_f64m8(vfloat64m8_t dest, double src, size_t vl) {
-  return vfmv_s(dest, src, vl);
-}
-

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
index dd5765bbe2339..0bddd22f207d0 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmv.c
@@ -547,15 +547,6 @@ int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -565,15 +556,6 @@ int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -583,15 +565,6 @@ int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -601,15 +574,6 @@ int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -619,15 +583,6 @@ int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -637,15 +592,6 @@ int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -655,15 +601,6 @@ int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dest, int8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -673,15 +610,6 @@ int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -691,15 +619,6 @@ int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -709,15 +628,6 @@ int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -727,15 +637,6 @@ int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -745,15 +646,6 @@ int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -763,15 +655,6 @@ int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dest, int16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -781,15 +664,6 @@ int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dest, int32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -799,15 +673,6 @@ int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dest, int32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -817,15 +682,6 @@ int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dest, int32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -835,15 +691,6 @@ int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dest, int32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -853,15 +700,6 @@ int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dest, int32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -871,15 +709,6 @@ int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dest, int64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -889,15 +718,6 @@ int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dest, int64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -907,15 +727,6 @@ int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dest, int64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -925,15 +736,6 @@ int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_i64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dest, int64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
@@ -943,15 +745,6 @@ uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
-//
-vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
@@ -961,15 +754,6 @@ uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
-//
-vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
@@ -979,15 +763,6 @@ uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
-//
-vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
@@ -997,15 +772,6 @@ uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
-//
-vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
@@ -1015,15 +781,6 @@ uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
-//
-vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
@@ -1033,15 +790,6 @@ uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
-//
-vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
@@ -1051,15 +799,6 @@ uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u8m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
-//
-vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dest, uint8_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
@@ -1069,15 +808,6 @@ uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
-//
-vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
@@ -1087,15 +817,6 @@ uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
-//
-vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
@@ -1105,15 +826,6 @@ uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
-//
-vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
@@ -1123,15 +835,6 @@ uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
-//
-vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
@@ -1141,15 +844,6 @@ uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
-//
-vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
@@ -1159,15 +853,6 @@ uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u16m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
-//
-vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dest, uint16_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
@@ -1177,15 +862,6 @@ uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
-//
-vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dest, uint32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
@@ -1195,15 +871,6 @@ uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u32m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
-//
-vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dest, uint32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
@@ -1213,15 +880,6 @@ uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u32m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
-//
-vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dest, uint32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
@@ -1231,15 +889,6 @@ uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u32m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
-//
-vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dest, uint32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
@@ -1249,15 +898,6 @@ uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u32m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
-//
-vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dest, uint32_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
@@ -1267,15 +907,6 @@ uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u64m1(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
-//
-vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dest, uint64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
@@ -1285,15 +916,6 @@ uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u64m2(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
-//
-vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dest, uint64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
@@ -1303,15 +925,6 @@ uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u64m4(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
-//
-vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dest, uint64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-
 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
 // CHECK-RV64-NEXT:  entry:
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
@@ -1321,12 +934,3 @@ uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
   return vmv_x(src);
 }
 
-// CHECK-RV64-LABEL: @test_vmv_s_x_u64m8(
-// CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
-// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
-//
-vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dest, uint64_t src, size_t vl) {
-  return vmv_s(dest, src, vl);
-}
-


        


More information about the cfe-commits mailing list