[clang] 3801296 - [5/7][Clang][RISCV] Remove default tail-undisturbed for vcompress intrinsics

via cfe-commits cfe-commits at lists.llvm.org
Mon Jan 23 23:50:48 PST 2023


Author: eopXD
Date: 2023-01-23T23:50:43-08:00
New Revision: 380129657fe3d594e966a8d7ff510ed6afb3e177

URL: https://github.com/llvm/llvm-project/commit/380129657fe3d594e966a8d7ff510ed6afb3e177
DIFF: https://github.com/llvm/llvm-project/commit/380129657fe3d594e966a8d7ff510ed6afb3e177.diff

LOG: [5/7][Clang][RISCV] Remove default tail-undisturbed for vcompress intrinsics

The destination parameter is removed for non-policy unmasked intrinsics.

The default policy for non-policy (implicit) vcompress intrinsics will
be tail agnostic and mask undisturbed.

This is the 5th commit of a patch-set that aims to remove the
IsPrototypeDefaultTU special case for the rvv-intrinsics.

Please refer to the cover letter in the 1st commit (D140895) for an
overview.

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140942

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 4e36f213677a3..630f32411b69a 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2244,7 +2244,7 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
 }
 
 // 17.5. Vector Compress Instruction
-let IsPrototypeDefaultTU = true,
+let IsPrototypeDefaultTU = false,
     HasMasked = false,
     UnMaskedPolicyScheme = HasPassthruOperand,
     MaskedPolicyScheme = NonePolicy,
@@ -2256,10 +2256,10 @@ let IsPrototypeDefaultTU = true,
     }] in {
   // signed and floating type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
-                                    [["vm", "v", "vvvm"]]>;
+                                    [["vm", "v", "vvm"]]>;
   // unsigned type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
-                                    [["vm", "Uv", "UvUvUvm"]]>;
+                                    [["vm", "Uv", "UvUvm"]]>;
 }
 
 // Miscellaneous

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
index f44a36897889a..8c53b7e94687f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
@@ -9,532 +9,532 @@
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_f16mf4(dest, src, mask, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f16mf4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_f16mf2(dest, src, mask, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f16mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_f16m1(dest, src, mask, vl);
+vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f16m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_f16m2(dest, src, mask, vl);
+vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f16m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_f16m4(dest, src, mask, vl);
+vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f16m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress_vm_f16m8(dest, src, mask, vl);
+vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_f16m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_f32mf2(dest, src, mask, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f32mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_f32m1(dest, src, mask, vl);
+vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f32m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_f32m2(dest, src, mask, vl);
+vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f32m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_f32m4(dest, src, mask, vl);
+vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f32m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_f32m8(dest, src, mask, vl);
+vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f32m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_f64m1(dest, src, mask, vl);
+vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f64m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_f64m2(dest, src, mask, vl);
+vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f64m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_f64m4(dest, src, mask, vl);
+vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f64m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_f64m8(dest, src, mask, vl);
+vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f64m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_i8mf8(dest, src, mask, vl);
+vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i8mf8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_i8mf4(dest, src, mask, vl);
+vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i8mf4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_i8mf2(dest, src, mask, vl);
+vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i8mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_i8m1(dest, src, mask, vl);
+vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i8m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_i8m2(dest, src, mask, vl);
+vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i8m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) {
-  return vcompress_vm_i8m4(dest, src, mask, vl);
+vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i8m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) {
-  return vcompress_vm_i8m8(dest, src, mask, vl);
+vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_i8m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_i16mf4(dest, src, mask, vl);
+vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i16mf4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_i16mf2(dest, src, mask, vl);
+vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i16mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_i16m1(dest, src, mask, vl);
+vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i16m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_i16m2(dest, src, mask, vl);
+vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i16m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_i16m4(dest, src, mask, vl);
+vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i16m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress_vm_i16m8(dest, src, mask, vl);
+vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i16m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_i32mf2(dest, src, mask, vl);
+vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i32mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_i32m1(dest, src, mask, vl);
+vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i32m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_i32m2(dest, src, mask, vl);
+vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i32m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_i32m4(dest, src, mask, vl);
+vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i32m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_i32m8(dest, src, mask, vl);
+vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i32m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_i64m1(dest, src, mask, vl);
+vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i64m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_i64m2(dest, src, mask, vl);
+vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i64m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_i64m4(dest, src, mask, vl);
+vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i64m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_i64m8(dest, src, mask, vl);
+vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i64m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_u8mf8(dest, src, mask, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u8mf8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_u8mf4(dest, src, mask, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u8mf4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_u8mf2(dest, src, mask, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u8mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_u8m1(dest, src, mask, vl);
+vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u8m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_u8m2(dest, src, mask, vl);
+vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u8m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) {
-  return vcompress_vm_u8m4(dest, src, mask, vl);
+vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u8m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) {
-  return vcompress_vm_u8m8(dest, src, mask, vl);
+vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_u8m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_u16mf4(dest, src, mask, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u16mf4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_u16mf2(dest, src, mask, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u16mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_u16m1(dest, src, mask, vl);
+vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u16m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_u16m2(dest, src, mask, vl);
+vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u16m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_u16m4(dest, src, mask, vl);
+vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u16m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress_vm_u16m8(dest, src, mask, vl);
+vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u16m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_u32mf2(dest, src, mask, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u32mf2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_u32m1(dest, src, mask, vl);
+vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u32m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_u32m2(dest, src, mask, vl);
+vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u32m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_u32m4(dest, src, mask, vl);
+vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u32m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress_vm_u32m8(dest, src, mask, vl);
+vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u32m8(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress_vm_u64m1(dest, src, mask, vl);
+vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u64m1(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress_vm_u64m2(dest, src, mask, vl);
+vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u64m2(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress_vm_u64m4(dest, src, mask, vl);
+vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u64m4(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress_vm_u64m8(dest, src, mask, vl);
+vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u64m8(src, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
index d17638675659f..a80b8644a0720 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
@@ -9,532 +9,532 @@
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8m1_t test_vcompress_vm_i8m1(vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8m2_t test_vcompress_vm_i8m2(vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8m4_t test_vcompress_vm_i8m4(vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint8m8_t test_vcompress_vm_i8m8(vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16m1_t test_vcompress_vm_i16m1(vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16m2_t test_vcompress_vm_i16m2(vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16m4_t test_vcompress_vm_i16m4(vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint16m8_t test_vcompress_vm_i16m8(vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint32m1_t test_vcompress_vm_i32m1(vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint32m2_t test_vcompress_vm_i32m2(vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint32m4_t test_vcompress_vm_i32m4(vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint32m8_t test_vcompress_vm_i32m8(vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint64m1_t test_vcompress_vm_i64m1(vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint64m2_t test_vcompress_vm_i64m2(vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint64m4_t test_vcompress_vm_i64m4(vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vint64m8_t test_vcompress_vm_i64m8(vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
 // CHECK-RV64-NEXT:  entry:
-// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) {
-  return vcompress(dest, src, mask, vl);
+vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(src, mask, vl);
 }
 


        


More information about the cfe-commits mailing list