[clang] 2532aa5 - [Clang][RISCV] Update operand order for vmerge and vcompress

via cfe-commits cfe-commits at lists.llvm.org
Thu Jan 12 17:30:58 PST 2023


Author: eopXD
Date: 2023-01-12T17:30:50-08:00
New Revision: 2532aa57665a6ff542213cbe2b38b9ed245ca5aa

URL: https://github.com/llvm/llvm-project/commit/2532aa57665a6ff542213cbe2b38b9ed245ca5aa
DIFF: https://github.com/llvm/llvm-project/commit/2532aa57665a6ff542213cbe2b38b9ed245ca5aa.diff

LOG: [Clang][RISCV] Update operand order for vmerge and vcompress

From:
  vint32m1_t vmerge_vvm_i32m1 (vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl);
  vint32m1_t vcompress_vm_i32m1 (vbool32_t mask, vint32m1_t src, size_t vl);

To:
  vint32m1_t vmerge_vvm_i32m1 (vint32m1_t op1, vint32m1_t op2, vbool32_t selector, size_t vl);
  vint32m1_t vcompress_vm_i32m1 (vint32m1_t src, vbool32_t selector, size_t vl);

Address issues:
riscv-non-isa/rvv-intrinsic-doc#140
riscv-non-isa/rvv-intrinsic-doc#167

Pull request:
riscv-non-isa/rvv-intrinsic-doc#185

Reviewed By: craig.topper

Differential Revision: https://reviews.llvm.org/D140686

Added: 
    

Modified: 
    clang/include/clang/Basic/riscv_vector.td
    clang/include/clang/Support/RISCVVIntrinsicUtils.h
    clang/lib/Support/RISCVVIntrinsicUtils.cpp
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
    clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c

Removed: 
    


################################################################################
diff  --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 29ff8accf7f07..6a68993544ac1 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -137,8 +137,6 @@ class PolicyScheme<int val>{
 def NonePolicy : PolicyScheme<0>;
 def HasPassthruOperand : PolicyScheme<1>;
 def HasPolicyOperand : PolicyScheme<2>;
-// Specail case for passthru operand which is not a first opeand.
-def HasPassthruOperandAtIdx1 : PolicyScheme<3>;
 
 class RVVBuiltin<string suffix, string prototype, string type_range,
                  string overloaded_suffix = ""> {
@@ -1878,20 +1876,19 @@ defm vwmaccus : RVVOutOp1Op2BuiltinSet<"vwmaccus", "csi",
 // 12.15. Vector Integer Merge Instructions
 // C/C++ Operand: (mask, op1, op2, vl), Intrinsic: (passthru, op1, op2, mask, vl)
 let HasMasked = false,
-    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    UnMaskedPolicyScheme = HasPassthruOperand,
     MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
       // insert poison passthru
       if (PolicyAttrs == TAIL_AGNOSTIC)
         Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
       IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
     }] in {
   defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "csil",
-                                    [["vvm", "v", "vmvv"],
-                                     ["vxm", "v", "vmve"],
-                                     ["vvm", "Uv", "UvmUvUv"],
-                                     ["vxm", "Uv", "UvmUvUe"]]>;
+                                    [["vvm", "v", "vvvm"],
+                                     ["vxm", "v", "vvem"],
+                                     ["vvm", "Uv", "UvUvUvm"],
+                                     ["vxm", "Uv", "UvUvUem"]]>;
 }
 
 // 12.16. Vector Integer Move Instructions
@@ -2022,19 +2019,18 @@ let Name = "vfclass_v", UnMaskedPolicyScheme = HasPassthruOperand in
 // 14.15. Vector Floating-Point Merge Instructio
 // C/C++ Operand: (mask, op1, op2, vl), Builtin: (op1, op2, mask, vl)
 let HasMasked = false,
-    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    UnMaskedPolicyScheme = HasPassthruOperand,
     MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
       // insert poison passthru
       if (PolicyAttrs == TAIL_AGNOSTIC)
         Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
       IntrinsicTypes = {ResultType, Ops[2]->getType(), Ops.back()->getType()};
     }] in {
   defm vmerge : RVVOutOp1BuiltinSet<"vmerge", "xfd",
-                                    [["vvm", "v", "vmvv"]]>;
+                                    [["vvm", "v", "vvvm"]]>;
   defm vfmerge : RVVOutOp1BuiltinSet<"vfmerge", "xfd",
-                                    [["vfm", "v", "vmve"]]>;
+                                     [["vfm", "v", "vvem"]]>;
 }
 
 // 14.16. Vector Floating-Point Move Instruction
@@ -2222,10 +2218,9 @@ defm vrgatherei16 : RVVOutBuiltinSet<"vrgatherei16_vv", "csil",
 // 17.5. Vector Compress Instruction
 let IsPrototypeDefaultTU = true,
     HasMasked = false,
-    UnMaskedPolicyScheme = HasPassthruOperandAtIdx1,
+    UnMaskedPolicyScheme = HasPassthruOperand,
     MaskedPolicyScheme = NonePolicy,
     ManualCodegen = [{
-      std::rotate(Ops.begin(), Ops.begin() + 1, Ops.end() - 1);
       // insert poison passthru
       if (PolicyAttrs == TAIL_AGNOSTIC)
         Ops.insert(Ops.begin(), llvm::PoisonValue::get(ResultType));
@@ -2233,10 +2228,10 @@ let IsPrototypeDefaultTU = true,
     }] in {
   // signed and floating type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csilxfd",
-                                    [["vm", "v", "vmvv"]]>;
+                                    [["vm", "v", "vvvm"]]>;
   // unsigned type
   defm vcompress : RVVOutBuiltinSet<"vcompress", "csil",
-                                    [["vm", "Uv", "UvmUvUv"]]>;
+                                    [["vm", "Uv", "UvUvUvm"]]>;
 }
 
 // Miscellaneous

diff  --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 0d5ebf058e604..68c8c48999e46 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -366,9 +366,6 @@ enum PolicyScheme : uint8_t {
   // Passthru operand is at first parameter in C builtin.
   HasPassthruOperand,
   HasPolicyOperand,
-  // Special case for vmerge, the passthru operand is second
-  // parameter in C builtin.
-  HasPassthruOperandAtIdx1,
 };
 
 // TODO refactor RVVIntrinsic class design after support all intrinsic

diff  --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index 9a550806f69d8..2fbd5622991ce 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -964,15 +964,6 @@ llvm::SmallVector<PrototypeDescriptor> RVVIntrinsic::computeBuiltinTypes(
       else if (PolicyAttrs.isTAPolicy() && HasPassthruOp &&
                IsPrototypeDefaultTU)
         NewPrototype.erase(NewPrototype.begin() + 1);
-      if (DefaultScheme == PolicyScheme::HasPassthruOperandAtIdx1) {
-        if (PolicyAttrs.isTUPolicy() && !IsPrototypeDefaultTU) {
-          // Insert undisturbed output to index 1
-          NewPrototype.insert(NewPrototype.begin() + 2, NewPrototype[0]);
-        } else if (PolicyAttrs.isTAPolicy() && IsPrototypeDefaultTU) {
-          // Erase passthru for TA policy
-          NewPrototype.erase(NewPrototype.begin() + 2);
-        }
-      }
     } else if (PolicyAttrs.isTUPolicy() && HasPassthruOp) {
       // NF > 1 cases for segment load operations.
       // Convert

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
index f36d530a29851..f44a36897889a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vcompress.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) {
-  return vcompress_vm_f16mf4(mask, dest, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f16mf4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) {
-  return vcompress_vm_f16mf2(mask, dest, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f16mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) {
-  return vcompress_vm_f16m1(mask, dest, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f16m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) {
-  return vcompress_vm_f16m2(mask, dest, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f16m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) {
-  return vcompress_vm_f16m4(mask, dest, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f16m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) {
-  return vcompress_vm_f16m8(mask, dest, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_f16m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) {
-  return vcompress_vm_f32mf2(mask, dest, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f32mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) {
-  return vcompress_vm_f32m1(mask, dest, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f32m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) {
-  return vcompress_vm_f32m2(mask, dest, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f32m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) {
-  return vcompress_vm_f32m4(mask, dest, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f32m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) {
-  return vcompress_vm_f32m8(mask, dest, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f32m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) {
-  return vcompress_vm_f64m1(mask, dest, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f64m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) {
-  return vcompress_vm_f64m2(mask, dest, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f64m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) {
-  return vcompress_vm_f64m4(mask, dest, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f64m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) {
-  return vcompress_vm_f64m8(mask, dest, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f64m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) {
-  return vcompress_vm_i8mf8(mask, dest, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i8mf8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
@@ -156,8 +156,8 @@ vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) {
-  return vcompress_vm_i8mf4(mask, dest, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i8mf4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
@@ -165,8 +165,8 @@ vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) {
-  return vcompress_vm_i8mf2(mask, dest, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i8mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
@@ -174,8 +174,8 @@ vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) {
-  return vcompress_vm_i8m1(mask, dest, src, vl);
+vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i8m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
@@ -183,8 +183,8 @@ vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) {
-  return vcompress_vm_i8m2(mask, dest, src, vl);
+vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i8m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
@@ -192,8 +192,8 @@ vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) {
-  return vcompress_vm_i8m4(mask, dest, src, vl);
+vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i8m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
@@ -201,8 +201,8 @@ vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) {
-  return vcompress_vm_i8m8(mask, dest, src, vl);
+vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_i8m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
@@ -210,8 +210,8 @@ vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) {
-  return vcompress_vm_i16mf4(mask, dest, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i16mf4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
@@ -219,8 +219,8 @@ vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) {
-  return vcompress_vm_i16mf2(mask, dest, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i16mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
@@ -228,8 +228,8 @@ vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) {
-  return vcompress_vm_i16m1(mask, dest, src, vl);
+vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i16m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
@@ -237,8 +237,8 @@ vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) {
-  return vcompress_vm_i16m2(mask, dest, src, vl);
+vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i16m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
@@ -246,8 +246,8 @@ vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) {
-  return vcompress_vm_i16m4(mask, dest, src, vl);
+vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i16m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
@@ -255,8 +255,8 @@ vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) {
-  return vcompress_vm_i16m8(mask, dest, src, vl);
+vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i16m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
@@ -264,8 +264,8 @@ vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) {
-  return vcompress_vm_i32mf2(mask, dest, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i32mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
@@ -273,8 +273,8 @@ vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) {
-  return vcompress_vm_i32m1(mask, dest, src, vl);
+vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i32m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
@@ -282,8 +282,8 @@ vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) {
-  return vcompress_vm_i32m2(mask, dest, src, vl);
+vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i32m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
@@ -291,8 +291,8 @@ vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) {
-  return vcompress_vm_i32m4(mask, dest, src, vl);
+vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i32m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
@@ -300,8 +300,8 @@ vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) {
-  return vcompress_vm_i32m8(mask, dest, src, vl);
+vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i32m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
@@ -309,8 +309,8 @@ vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) {
-  return vcompress_vm_i64m1(mask, dest, src, vl);
+vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i64m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
@@ -318,8 +318,8 @@ vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) {
-  return vcompress_vm_i64m2(mask, dest, src, vl);
+vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i64m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
@@ -327,8 +327,8 @@ vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) {
-  return vcompress_vm_i64m4(mask, dest, src, vl);
+vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i64m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
@@ -336,8 +336,8 @@ vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) {
-  return vcompress_vm_i64m8(mask, dest, src, vl);
+vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i64m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
@@ -345,8 +345,8 @@ vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) {
-  return vcompress_vm_u8mf8(mask, dest, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u8mf8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
@@ -354,8 +354,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) {
-  return vcompress_vm_u8mf4(mask, dest, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u8mf4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
@@ -363,8 +363,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) {
-  return vcompress_vm_u8mf2(mask, dest, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u8mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
@@ -372,8 +372,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) {
-  return vcompress_vm_u8m1(mask, dest, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u8m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
@@ -381,8 +381,8 @@ vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) {
-  return vcompress_vm_u8m2(mask, dest, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u8m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
@@ -390,8 +390,8 @@ vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) {
-  return vcompress_vm_u8m4(mask, dest, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u8m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
@@ -399,8 +399,8 @@ vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) {
-  return vcompress_vm_u8m8(mask, dest, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_u8m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
@@ -408,8 +408,8 @@ vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) {
-  return vcompress_vm_u16mf4(mask, dest, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u16mf4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
@@ -417,8 +417,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) {
-  return vcompress_vm_u16mf2(mask, dest, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u16mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
@@ -426,8 +426,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) {
-  return vcompress_vm_u16m1(mask, dest, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u16m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
@@ -435,8 +435,8 @@ vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) {
-  return vcompress_vm_u16m2(mask, dest, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u16m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
@@ -444,8 +444,8 @@ vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) {
-  return vcompress_vm_u16m4(mask, dest, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u16m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
@@ -453,8 +453,8 @@ vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) {
-  return vcompress_vm_u16m8(mask, dest, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u16m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
@@ -462,8 +462,8 @@ vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) {
-  return vcompress_vm_u32mf2(mask, dest, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u32mf2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
@@ -471,8 +471,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) {
-  return vcompress_vm_u32m1(mask, dest, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u32m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
@@ -480,8 +480,8 @@ vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) {
-  return vcompress_vm_u32m2(mask, dest, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u32m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
@@ -489,8 +489,8 @@ vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) {
-  return vcompress_vm_u32m4(mask, dest, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u32m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
@@ -498,8 +498,8 @@ vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) {
-  return vcompress_vm_u32m8(mask, dest, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u32m8(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
@@ -507,8 +507,8 @@ vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) {
-  return vcompress_vm_u64m1(mask, dest, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u64m1(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
@@ -516,8 +516,8 @@ vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) {
-  return vcompress_vm_u64m2(mask, dest, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u64m2(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
@@ -525,8 +525,8 @@ vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) {
-  return vcompress_vm_u64m4(mask, dest, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u64m4(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
@@ -534,7 +534,7 @@ vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) {
-  return vcompress_vm_u64m8(mask, dest, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u64m8(dest, src, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c
index 329707ee76a6a..29de4c1405320 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vfmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf4(mask, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf2(mask, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m1(mask, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m2(mask, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m4(mask, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m8(mask, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge_vfm_f16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32mf2(mask, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m1(mask, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m2(mask, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m4(mask, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m8(mask, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m1(mask, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m2(mask, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m4(mask, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
@@ -138,7 +138,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m8(mask, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f64m8(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
index cc2c8176a5a49..6ca7cd6494011 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/non-overloaded/vmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_i8mf8(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i8mf8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
@@ -21,8 +21,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf8(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i8mf8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
@@ -30,8 +30,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_i8mf4(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i8mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
@@ -39,8 +39,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf4(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i8mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
@@ -48,8 +48,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_i8mf2(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i8mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
@@ -57,8 +57,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf2(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i8mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
@@ -66,8 +66,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge_vvm_i8m1(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i8m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
@@ -75,8 +75,8 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m1(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i8m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
@@ -84,8 +84,8 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge_vvm_i8m2(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i8m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
@@ -93,8 +93,8 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m2(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i8m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
@@ -102,8 +102,8 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge_vvm_i8m4(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i8m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
@@ -111,8 +111,8 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m4(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i8m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
@@ -120,8 +120,8 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge_vvm_i8m8(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_i8m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
@@ -129,8 +129,8 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m8(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_i8m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
@@ -138,8 +138,8 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_i16mf4(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
@@ -147,8 +147,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf4(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
@@ -156,8 +156,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_i16mf2(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
@@ -165,8 +165,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf2(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
@@ -174,8 +174,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge_vvm_i16m1(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
@@ -183,8 +183,8 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m1(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
@@ -192,8 +192,8 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge_vvm_i16m2(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
@@ -201,8 +201,8 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m2(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
@@ -210,8 +210,8 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge_vvm_i16m4(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
@@ -219,8 +219,8 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m4(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
@@ -228,8 +228,8 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge_vvm_i16m8(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
@@ -237,8 +237,8 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m8(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
@@ -246,8 +246,8 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_i32mf2(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
@@ -255,8 +255,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32mf2(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
@@ -264,8 +264,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge_vvm_i32m1(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
@@ -273,8 +273,8 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m1(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
@@ -282,8 +282,8 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge_vvm_i32m2(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
@@ -291,8 +291,8 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m2(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
@@ -300,8 +300,8 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge_vvm_i32m4(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
@@ -309,8 +309,8 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m4(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
@@ -318,8 +318,8 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge_vvm_i32m8(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
@@ -327,8 +327,8 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m8(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
@@ -336,8 +336,8 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge_vvm_i64m1(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
@@ -345,8 +345,8 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m1(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
@@ -354,8 +354,8 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge_vvm_i64m2(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
@@ -363,8 +363,8 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m2(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
@@ -372,8 +372,8 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge_vvm_i64m4(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
@@ -381,8 +381,8 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m4(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
@@ -390,8 +390,8 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge_vvm_i64m8(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i64m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
@@ -399,8 +399,8 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m8(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i64m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
@@ -408,8 +408,8 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_u8mf8(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u8mf8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
@@ -417,8 +417,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf8(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u8mf8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
@@ -426,8 +426,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_u8mf4(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u8mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
@@ -435,8 +435,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf4(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u8mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
@@ -444,8 +444,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_u8mf2(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u8mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
@@ -453,8 +453,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf2(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u8mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
@@ -462,8 +462,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge_vvm_u8m1(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u8m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
@@ -471,8 +471,8 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m1(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u8m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
@@ -480,8 +480,8 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge_vvm_u8m2(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u8m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
@@ -489,8 +489,8 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m2(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u8m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
@@ -498,8 +498,8 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge_vvm_u8m4(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u8m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
@@ -507,8 +507,8 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m4(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u8m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
@@ -516,8 +516,8 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge_vvm_u8m8(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_u8m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
@@ -525,8 +525,8 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m8(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_u8m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
@@ -534,8 +534,8 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_u16mf4(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
@@ -543,8 +543,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf4(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
@@ -552,8 +552,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_u16mf2(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
@@ -561,8 +561,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf2(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
@@ -570,8 +570,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge_vvm_u16m1(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
@@ -579,8 +579,8 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m1(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
@@ -588,8 +588,8 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge_vvm_u16m2(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
@@ -597,8 +597,8 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m2(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
@@ -606,8 +606,8 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge_vvm_u16m4(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
@@ -615,8 +615,8 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m4(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
@@ -624,8 +624,8 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge_vvm_u16m8(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
@@ -633,8 +633,8 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m8(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
@@ -642,8 +642,8 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_u32mf2(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
@@ -651,8 +651,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32mf2(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
@@ -660,8 +660,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge_vvm_u32m1(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
@@ -669,8 +669,8 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m1(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
@@ -678,8 +678,8 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge_vvm_u32m2(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
@@ -687,8 +687,8 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m2(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
@@ -696,8 +696,8 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge_vvm_u32m4(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
@@ -705,8 +705,8 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m4(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
@@ -714,8 +714,8 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge_vvm_u32m8(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
@@ -723,8 +723,8 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m8(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
@@ -732,8 +732,8 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge_vvm_u64m1(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
@@ -741,8 +741,8 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m1(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
@@ -750,8 +750,8 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge_vvm_u64m2(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
@@ -759,8 +759,8 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m2(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
@@ -768,8 +768,8 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge_vvm_u64m4(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
@@ -777,8 +777,8 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m4(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
@@ -786,8 +786,8 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge_vvm_u64m8(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u64m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
@@ -795,8 +795,8 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m8(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u64m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4(
@@ -804,8 +804,8 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge_vvm_f16mf4(mask, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f16mf4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2(
@@ -813,8 +813,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge_vvm_f16mf2(mask, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f16mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1(
@@ -822,8 +822,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge_vvm_f16m1(mask, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f16m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2(
@@ -831,8 +831,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge_vvm_f16m2(mask, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f16m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4(
@@ -840,8 +840,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge_vvm_f16m4(mask, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f16m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8(
@@ -849,8 +849,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge_vvm_f16m8(mask, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_f16m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
@@ -858,8 +858,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge_vvm_f32mf2(mask, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f32mf2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
@@ -867,8 +867,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge_vvm_f32m1(mask, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f32m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
@@ -876,8 +876,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge_vvm_f32m2(mask, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f32m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
@@ -885,8 +885,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge_vvm_f32m4(mask, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f32m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
@@ -894,8 +894,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge_vvm_f32m8(mask, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f32m8(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
@@ -903,8 +903,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge_vvm_f64m1(mask, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f64m1(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
@@ -912,8 +912,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge_vvm_f64m2(mask, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f64m2(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
@@ -921,8 +921,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge_vvm_f64m4(mask, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f64m4(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
@@ -930,7 +930,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge_vvm_f64m8(mask, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f64m8(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
index 539b3f993d40f..d17638675659f 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vcompress.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[DEST:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloat16mf4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4(vfloat16mf4_t dest, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4(vbool64_t mask, vfloat16mf4_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[DEST:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloat16mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2(vfloat16mf2_t dest, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2(vbool32_t mask, vfloat16mf2_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[DEST:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1(vfloat16m1_t dest, vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vcompress_vm_f16m1(vbool16_t mask, vfloat16m1_t dest, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[DEST:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2(vfloat16m2_t dest, vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vcompress_vm_f16m2(vbool8_t mask, vfloat16m2_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[DEST:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4(vfloat16m4_t dest, vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vcompress_vm_f16m4(vbool4_t mask, vfloat16m4_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[DEST:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8(vfloat16m8_t dest, vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vcompress_vm_f16m8(vbool2_t mask, vfloat16m8_t dest, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[DEST:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloat32mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2(vfloat32mf2_t dest, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2(vbool64_t mask, vfloat32mf2_t dest, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[DEST:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1(vfloat32m1_t dest, vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vcompress_vm_f32m1(vbool32_t mask, vfloat32m1_t dest, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[DEST:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2(vfloat32m2_t dest, vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vcompress_vm_f32m2(vbool16_t mask, vfloat32m2_t dest, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[DEST:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4(vfloat32m4_t dest, vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vcompress_vm_f32m4(vbool8_t mask, vfloat32m4_t dest, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[DEST:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8(vfloat32m8_t dest, vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vcompress_vm_f32m8(vbool4_t mask, vfloat32m8_t dest, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[DEST:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1(vfloat64m1_t dest, vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vcompress_vm_f64m1(vbool64_t mask, vfloat64m1_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[DEST:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2(vfloat64m2_t dest, vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vcompress_vm_f64m2(vbool32_t mask, vfloat64m2_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[DEST:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4(vfloat64m4_t dest, vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vcompress_vm_f64m4(vbool16_t mask, vfloat64m4_t dest, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[DEST:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8(vfloat64m8_t dest, vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vcompress_vm_f64m8(vbool8_t mask, vfloat64m8_t dest, vfloat64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8(vint8mf8_t dest, vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4(
@@ -156,8 +156,8 @@ vint8mf8_t test_vcompress_vm_i8mf8(vbool64_t mask, vint8mf8_t dest, vint8mf8_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4(vint8mf4_t dest, vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2(
@@ -165,8 +165,8 @@ vint8mf4_t test_vcompress_vm_i8mf4(vbool32_t mask, vint8mf4_t dest, vint8mf4_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2(vint8mf2_t dest, vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1(
@@ -174,8 +174,8 @@ vint8mf2_t test_vcompress_vm_i8mf2(vbool16_t mask, vint8mf2_t dest, vint8mf2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8m1_t test_vcompress_vm_i8m1(vint8m1_t dest, vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2(
@@ -183,8 +183,8 @@ vint8m1_t test_vcompress_vm_i8m1(vbool8_t mask, vint8m1_t dest, vint8m1_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8m2_t test_vcompress_vm_i8m2(vint8m2_t dest, vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4(
@@ -192,8 +192,8 @@ vint8m2_t test_vcompress_vm_i8m2(vbool4_t mask, vint8m2_t dest, vint8m2_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8m4_t test_vcompress_vm_i8m4(vint8m4_t dest, vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8(
@@ -201,8 +201,8 @@ vint8m4_t test_vcompress_vm_i8m4(vbool2_t mask, vint8m4_t dest, vint8m4_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint8m8_t test_vcompress_vm_i8m8(vint8m8_t dest, vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4(
@@ -210,8 +210,8 @@ vint8m8_t test_vcompress_vm_i8m8(vbool1_t mask, vint8m8_t dest, vint8m8_t src, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4(vint16mf4_t dest, vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2(
@@ -219,8 +219,8 @@ vint16mf4_t test_vcompress_vm_i16mf4(vbool64_t mask, vint16mf4_t dest, vint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2(vint16mf2_t dest, vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1(
@@ -228,8 +228,8 @@ vint16mf2_t test_vcompress_vm_i16mf2(vbool32_t mask, vint16mf2_t dest, vint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16m1_t test_vcompress_vm_i16m1(vint16m1_t dest, vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2(
@@ -237,8 +237,8 @@ vint16m1_t test_vcompress_vm_i16m1(vbool16_t mask, vint16m1_t dest, vint16m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16m2_t test_vcompress_vm_i16m2(vint16m2_t dest, vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4(
@@ -246,8 +246,8 @@ vint16m2_t test_vcompress_vm_i16m2(vbool8_t mask, vint16m2_t dest, vint16m2_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16m4_t test_vcompress_vm_i16m4(vint16m4_t dest, vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8(
@@ -255,8 +255,8 @@ vint16m4_t test_vcompress_vm_i16m4(vbool4_t mask, vint16m4_t dest, vint16m4_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint16m8_t test_vcompress_vm_i16m8(vint16m8_t dest, vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2(
@@ -264,8 +264,8 @@ vint16m8_t test_vcompress_vm_i16m8(vbool2_t mask, vint16m8_t dest, vint16m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2(vint32mf2_t dest, vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1(
@@ -273,8 +273,8 @@ vint32mf2_t test_vcompress_vm_i32mf2(vbool64_t mask, vint32mf2_t dest, vint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint32m1_t test_vcompress_vm_i32m1(vint32m1_t dest, vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2(
@@ -282,8 +282,8 @@ vint32m1_t test_vcompress_vm_i32m1(vbool32_t mask, vint32m1_t dest, vint32m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint32m2_t test_vcompress_vm_i32m2(vint32m2_t dest, vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4(
@@ -291,8 +291,8 @@ vint32m2_t test_vcompress_vm_i32m2(vbool16_t mask, vint32m2_t dest, vint32m2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint32m4_t test_vcompress_vm_i32m4(vint32m4_t dest, vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8(
@@ -300,8 +300,8 @@ vint32m4_t test_vcompress_vm_i32m4(vbool8_t mask, vint32m4_t dest, vint32m4_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint32m8_t test_vcompress_vm_i32m8(vint32m8_t dest, vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1(
@@ -309,8 +309,8 @@ vint32m8_t test_vcompress_vm_i32m8(vbool4_t mask, vint32m8_t dest, vint32m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint64m1_t test_vcompress_vm_i64m1(vint64m1_t dest, vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2(
@@ -318,8 +318,8 @@ vint64m1_t test_vcompress_vm_i64m1(vbool64_t mask, vint64m1_t dest, vint64m1_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint64m2_t test_vcompress_vm_i64m2(vint64m2_t dest, vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4(
@@ -327,8 +327,8 @@ vint64m2_t test_vcompress_vm_i64m2(vbool32_t mask, vint64m2_t dest, vint64m2_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint64m4_t test_vcompress_vm_i64m4(vint64m4_t dest, vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8(
@@ -336,8 +336,8 @@ vint64m4_t test_vcompress_vm_i64m4(vbool16_t mask, vint64m4_t dest, vint64m4_t s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vint64m8_t test_vcompress_vm_i64m8(vint64m8_t dest, vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8(
@@ -345,8 +345,8 @@ vint64m8_t test_vcompress_vm_i64m8(vbool8_t mask, vint64m8_t dest, vint64m8_t sr
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[DEST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8(vuint8mf8_t dest, vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4(
@@ -354,8 +354,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8(vbool64_t mask, vuint8mf8_t dest, vuint8mf8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[DEST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4(vuint8mf4_t dest, vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2(
@@ -363,8 +363,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4(vbool32_t mask, vuint8mf4_t dest, vuint8mf4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[DEST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2(vuint8mf2_t dest, vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1(
@@ -372,8 +372,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2(vbool16_t mask, vuint8mf2_t dest, vuint8mf2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[DEST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1(vuint8m1_t dest, vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2(
@@ -381,8 +381,8 @@ vuint8m1_t test_vcompress_vm_u8m1(vbool8_t mask, vuint8m1_t dest, vuint8m1_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[DEST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2(vuint8m2_t dest, vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4(
@@ -390,8 +390,8 @@ vuint8m2_t test_vcompress_vm_u8m2(vbool4_t mask, vuint8m2_t dest, vuint8m2_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[DEST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4(vuint8m4_t dest, vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8(
@@ -399,8 +399,8 @@ vuint8m4_t test_vcompress_vm_u8m4(vbool2_t mask, vuint8m4_t dest, vuint8m4_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[DEST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8(vuint8m8_t dest, vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4(
@@ -408,8 +408,8 @@ vuint8m8_t test_vcompress_vm_u8m8(vbool1_t mask, vuint8m8_t dest, vuint8m8_t src
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[DEST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16mf4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4(vuint16mf4_t dest, vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2(
@@ -417,8 +417,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4(vbool64_t mask, vuint16mf4_t dest, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[DEST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2(vuint16mf2_t dest, vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1(
@@ -426,8 +426,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2(vbool32_t mask, vuint16mf2_t dest, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[DEST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1(vuint16m1_t dest, vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2(
@@ -435,8 +435,8 @@ vuint16m1_t test_vcompress_vm_u16m1(vbool16_t mask, vuint16m1_t dest, vuint16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[DEST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2(vuint16m2_t dest, vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4(
@@ -444,8 +444,8 @@ vuint16m2_t test_vcompress_vm_u16m2(vbool8_t mask, vuint16m2_t dest, vuint16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[DEST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4(vuint16m4_t dest, vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8(
@@ -453,8 +453,8 @@ vuint16m4_t test_vcompress_vm_u16m4(vbool4_t mask, vuint16m4_t dest, vuint16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[DEST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8(vuint16m8_t dest, vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2(
@@ -462,8 +462,8 @@ vuint16m8_t test_vcompress_vm_u16m8(vbool2_t mask, vuint16m8_t dest, vuint16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[DEST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32mf2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2(vuint32mf2_t dest, vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1(
@@ -471,8 +471,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2(vbool64_t mask, vuint32mf2_t dest, vuint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[DEST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1(vuint32m1_t dest, vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2(
@@ -480,8 +480,8 @@ vuint32m1_t test_vcompress_vm_u32m1(vbool32_t mask, vuint32m1_t dest, vuint32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[DEST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2(vuint32m2_t dest, vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4(
@@ -489,8 +489,8 @@ vuint32m2_t test_vcompress_vm_u32m2(vbool16_t mask, vuint32m2_t dest, vuint32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[DEST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4(vuint32m4_t dest, vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8(
@@ -498,8 +498,8 @@ vuint32m4_t test_vcompress_vm_u32m4(vbool8_t mask, vuint32m4_t dest, vuint32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[DEST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8(vuint32m8_t dest, vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1(
@@ -507,8 +507,8 @@ vuint32m8_t test_vcompress_vm_u32m8(vbool4_t mask, vuint32m8_t dest, vuint32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[DEST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1(vuint64m1_t dest, vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2(
@@ -516,8 +516,8 @@ vuint64m1_t test_vcompress_vm_u64m1(vbool64_t mask, vuint64m1_t dest, vuint64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[DEST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2(vuint64m2_t dest, vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4(
@@ -525,8 +525,8 @@ vuint64m2_t test_vcompress_vm_u64m2(vbool32_t mask, vuint64m2_t dest, vuint64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[DEST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4(vuint64m4_t dest, vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8(
@@ -534,7 +534,7 @@ vuint64m4_t test_vcompress_vm_u64m4(vbool16_t mask, vuint64m4_t dest, vuint64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[DEST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8(vbool8_t mask, vuint64m8_t dest, vuint64m8_t src, size_t vl) {
-  return vcompress(mask, dest, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8(vuint64m8_t dest, vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress(dest, src, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
index 35c4be499c030..135cfeb0a36b4 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vfmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1(vbool16_t mask, vfloat16m1_t op1, _Float16 o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2(vbool8_t mask, vfloat16m2_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4(vbool4_t mask, vfloat16m4_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8(vbool2_t mask, vfloat16m8_t op1, _Float16 op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1(vbool32_t mask, vfloat32m1_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2(vbool16_t mask, vfloat32m2_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4(vbool8_t mask, vfloat32m4_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8(vbool4_t mask, vfloat32m8_t op1, float op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1(vbool64_t mask, vfloat64m1_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2(vbool32_t mask, vfloat64m2_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8(
@@ -138,7 +138,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4(vbool16_t mask, vfloat64m4_t op1, double op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge(mask, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
index c5d3e7386c6f7..f5f4c75568b77 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/non-policy/overloaded/vmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8(
@@ -21,8 +21,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4(
@@ -30,8 +30,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8(vbool64_t mask, vint8mf8_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4(
@@ -39,8 +39,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2(
@@ -48,8 +48,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4(vbool32_t mask, vint8mf4_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2(
@@ -57,8 +57,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1(
@@ -66,8 +66,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2(vbool16_t mask, vint8mf2_t op1, int8_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1(
@@ -75,8 +75,8 @@ vint8m1_t test_vmerge_vvm_i8m1(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2(
@@ -84,8 +84,8 @@ vint8m1_t test_vmerge_vxm_i8m1(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2(
@@ -93,8 +93,8 @@ vint8m2_t test_vmerge_vvm_i8m2(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4(
@@ -102,8 +102,8 @@ vint8m2_t test_vmerge_vxm_i8m2(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4(
@@ -111,8 +111,8 @@ vint8m4_t test_vmerge_vvm_i8m4(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8(
@@ -120,8 +120,8 @@ vint8m4_t test_vmerge_vxm_i8m4(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8(
@@ -129,8 +129,8 @@ vint8m8_t test_vmerge_vvm_i8m8(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4(
@@ -138,8 +138,8 @@ vint8m8_t test_vmerge_vxm_i8m8(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4(
@@ -147,8 +147,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4(vbool64_t mask, vint16mf4_t op1, vint16mf4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2(
@@ -156,8 +156,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4(vbool64_t mask, vint16mf4_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2(
@@ -165,8 +165,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2(vbool32_t mask, vint16mf2_t op1, vint16mf2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1(
@@ -174,8 +174,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2(vbool32_t mask, vint16mf2_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1(
@@ -183,8 +183,8 @@ vint16m1_t test_vmerge_vvm_i16m1(vbool16_t mask, vint16m1_t op1, vint16m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2(
@@ -192,8 +192,8 @@ vint16m1_t test_vmerge_vxm_i16m1(vbool16_t mask, vint16m1_t op1, int16_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2(
@@ -201,8 +201,8 @@ vint16m2_t test_vmerge_vvm_i16m2(vbool8_t mask, vint16m2_t op1, vint16m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4(
@@ -210,8 +210,8 @@ vint16m2_t test_vmerge_vxm_i16m2(vbool8_t mask, vint16m2_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4(
@@ -219,8 +219,8 @@ vint16m4_t test_vmerge_vvm_i16m4(vbool4_t mask, vint16m4_t op1, vint16m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8(
@@ -228,8 +228,8 @@ vint16m4_t test_vmerge_vxm_i16m4(vbool4_t mask, vint16m4_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8(
@@ -237,8 +237,8 @@ vint16m8_t test_vmerge_vvm_i16m8(vbool2_t mask, vint16m8_t op1, vint16m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2(
@@ -246,8 +246,8 @@ vint16m8_t test_vmerge_vxm_i16m8(vbool2_t mask, vint16m8_t op1, int16_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2(
@@ -255,8 +255,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2(vbool64_t mask, vint32mf2_t op1, vint32mf2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1(
@@ -264,8 +264,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2(vbool64_t mask, vint32mf2_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1(
@@ -273,8 +273,8 @@ vint32m1_t test_vmerge_vvm_i32m1(vbool32_t mask, vint32m1_t op1, vint32m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2(
@@ -282,8 +282,8 @@ vint32m1_t test_vmerge_vxm_i32m1(vbool32_t mask, vint32m1_t op1, int32_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2(
@@ -291,8 +291,8 @@ vint32m2_t test_vmerge_vvm_i32m2(vbool16_t mask, vint32m2_t op1, vint32m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4(
@@ -300,8 +300,8 @@ vint32m2_t test_vmerge_vxm_i32m2(vbool16_t mask, vint32m2_t op1, int32_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4(
@@ -309,8 +309,8 @@ vint32m4_t test_vmerge_vvm_i32m4(vbool8_t mask, vint32m4_t op1, vint32m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8(
@@ -318,8 +318,8 @@ vint32m4_t test_vmerge_vxm_i32m4(vbool8_t mask, vint32m4_t op1, int32_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8(
@@ -327,8 +327,8 @@ vint32m8_t test_vmerge_vvm_i32m8(vbool4_t mask, vint32m8_t op1, vint32m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1(
@@ -336,8 +336,8 @@ vint32m8_t test_vmerge_vxm_i32m8(vbool4_t mask, vint32m8_t op1, int32_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1(
@@ -345,8 +345,8 @@ vint64m1_t test_vmerge_vvm_i64m1(vbool64_t mask, vint64m1_t op1, vint64m1_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2(
@@ -354,8 +354,8 @@ vint64m1_t test_vmerge_vxm_i64m1(vbool64_t mask, vint64m1_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2(
@@ -363,8 +363,8 @@ vint64m2_t test_vmerge_vvm_i64m2(vbool32_t mask, vint64m2_t op1, vint64m2_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4(
@@ -372,8 +372,8 @@ vint64m2_t test_vmerge_vxm_i64m2(vbool32_t mask, vint64m2_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4(
@@ -381,8 +381,8 @@ vint64m4_t test_vmerge_vvm_i64m4(vbool16_t mask, vint64m4_t op1, vint64m4_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8(
@@ -390,8 +390,8 @@ vint64m4_t test_vmerge_vxm_i64m4(vbool16_t mask, vint64m4_t op1, int64_t op2, si
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8(
@@ -399,8 +399,8 @@ vint64m8_t test_vmerge_vvm_i64m8(vbool8_t mask, vint64m8_t op1, vint64m8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8(
@@ -408,8 +408,8 @@ vint64m8_t test_vmerge_vxm_i64m8(vbool8_t mask, vint64m8_t op1, int64_t op2, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8(
@@ -417,8 +417,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4(
@@ -426,8 +426,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8(vbool64_t mask, vuint8mf8_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4(
@@ -435,8 +435,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2(
@@ -444,8 +444,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4(vbool32_t mask, vuint8mf4_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2(
@@ -453,8 +453,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1(
@@ -462,8 +462,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2(vbool16_t mask, vuint8mf2_t op1, uint8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1(
@@ -471,8 +471,8 @@ vuint8m1_t test_vmerge_vvm_u8m1(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2(
@@ -480,8 +480,8 @@ vuint8m1_t test_vmerge_vxm_u8m1(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2(
@@ -489,8 +489,8 @@ vuint8m2_t test_vmerge_vvm_u8m2(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4(
@@ -498,8 +498,8 @@ vuint8m2_t test_vmerge_vxm_u8m2(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4(
@@ -507,8 +507,8 @@ vuint8m4_t test_vmerge_vvm_u8m4(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8(
@@ -516,8 +516,8 @@ vuint8m4_t test_vmerge_vxm_u8m4(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8(
@@ -525,8 +525,8 @@ vuint8m8_t test_vmerge_vvm_u8m8(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4(
@@ -534,8 +534,8 @@ vuint8m8_t test_vmerge_vxm_u8m8(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4(
@@ -543,8 +543,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4(vbool64_t mask, vuint16mf4_t op1, vuint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2(
@@ -552,8 +552,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4(vbool64_t mask, vuint16mf4_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2(
@@ -561,8 +561,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2(vbool32_t mask, vuint16mf2_t op1, vuint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1(
@@ -570,8 +570,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2(vbool32_t mask, vuint16mf2_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1(
@@ -579,8 +579,8 @@ vuint16m1_t test_vmerge_vvm_u16m1(vbool16_t mask, vuint16m1_t op1, vuint16m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2(
@@ -588,8 +588,8 @@ vuint16m1_t test_vmerge_vxm_u16m1(vbool16_t mask, vuint16m1_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2(
@@ -597,8 +597,8 @@ vuint16m2_t test_vmerge_vvm_u16m2(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4(
@@ -606,8 +606,8 @@ vuint16m2_t test_vmerge_vxm_u16m2(vbool8_t mask, vuint16m2_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4(
@@ -615,8 +615,8 @@ vuint16m4_t test_vmerge_vvm_u16m4(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8(
@@ -624,8 +624,8 @@ vuint16m4_t test_vmerge_vxm_u16m4(vbool4_t mask, vuint16m4_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8(
@@ -633,8 +633,8 @@ vuint16m8_t test_vmerge_vvm_u16m8(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2(
@@ -642,8 +642,8 @@ vuint16m8_t test_vmerge_vxm_u16m8(vbool2_t mask, vuint16m8_t op1, uint16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2(
@@ -651,8 +651,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2(vbool64_t mask, vuint32mf2_t op1, vuint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1(
@@ -660,8 +660,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2(vbool64_t mask, vuint32mf2_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1(
@@ -669,8 +669,8 @@ vuint32m1_t test_vmerge_vvm_u32m1(vbool32_t mask, vuint32m1_t op1, vuint32m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2(
@@ -678,8 +678,8 @@ vuint32m1_t test_vmerge_vxm_u32m1(vbool32_t mask, vuint32m1_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2(
@@ -687,8 +687,8 @@ vuint32m2_t test_vmerge_vvm_u32m2(vbool16_t mask, vuint32m2_t op1, vuint32m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4(
@@ -696,8 +696,8 @@ vuint32m2_t test_vmerge_vxm_u32m2(vbool16_t mask, vuint32m2_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4(
@@ -705,8 +705,8 @@ vuint32m4_t test_vmerge_vvm_u32m4(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8(
@@ -714,8 +714,8 @@ vuint32m4_t test_vmerge_vxm_u32m4(vbool8_t mask, vuint32m4_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8(
@@ -723,8 +723,8 @@ vuint32m8_t test_vmerge_vvm_u32m8(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1(
@@ -732,8 +732,8 @@ vuint32m8_t test_vmerge_vxm_u32m8(vbool4_t mask, vuint32m8_t op1, uint32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1(
@@ -741,8 +741,8 @@ vuint64m1_t test_vmerge_vvm_u64m1(vbool64_t mask, vuint64m1_t op1, vuint64m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2(
@@ -750,8 +750,8 @@ vuint64m1_t test_vmerge_vxm_u64m1(vbool64_t mask, vuint64m1_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2(
@@ -759,8 +759,8 @@ vuint64m2_t test_vmerge_vvm_u64m2(vbool32_t mask, vuint64m2_t op1, vuint64m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4(
@@ -768,8 +768,8 @@ vuint64m2_t test_vmerge_vxm_u64m2(vbool32_t mask, vuint64m2_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4(
@@ -777,8 +777,8 @@ vuint64m4_t test_vmerge_vvm_u64m4(vbool16_t mask, vuint64m4_t op1, vuint64m4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8(
@@ -786,8 +786,8 @@ vuint64m4_t test_vmerge_vxm_u64m4(vbool16_t mask, vuint64m4_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8(
@@ -795,8 +795,8 @@ vuint64m8_t test_vmerge_vvm_u64m8(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4(
@@ -804,8 +804,8 @@ vuint64m8_t test_vmerge_vxm_u64m8(vbool8_t mask, vuint64m8_t op1, uint64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2(
@@ -813,8 +813,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4(vbool64_t mask, vfloat16mf4_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1(
@@ -822,8 +822,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2(vbool32_t mask, vfloat16mf2_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2(
@@ -831,8 +831,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4(
@@ -840,8 +840,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8(
@@ -849,8 +849,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2(
@@ -858,8 +858,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1(
@@ -867,8 +867,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2(vbool64_t mask, vfloat32mf2_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2(
@@ -876,8 +876,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4(
@@ -885,8 +885,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8(
@@ -894,8 +894,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1(
@@ -903,8 +903,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2(
@@ -912,8 +912,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4(
@@ -921,8 +921,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8(
@@ -930,7 +930,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge(mask, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
index c5988c7e5d7b9..cfaed256d2165 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vcompress.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
-  return vcompress_vm_f16mf4_tu(mask, maskedoff, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f16mf4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
-  return vcompress_vm_f16mf2_tu(mask, maskedoff, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f16mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
-  return vcompress_vm_f16m1_tu(mask, maskedoff, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f16m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
-  return vcompress_vm_f16m2_tu(mask, maskedoff, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f16m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
-  return vcompress_vm_f16m4_tu(mask, maskedoff, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f16m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
-  return vcompress_vm_f16m8_tu(mask, maskedoff, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_f16m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
-  return vcompress_vm_f32mf2_tu(mask, maskedoff, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f32mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
-  return vcompress_vm_f32m1_tu(mask, maskedoff, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f32m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
-  return vcompress_vm_f32m2_tu(mask, maskedoff, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f32m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
-  return vcompress_vm_f32m4_tu(mask, maskedoff, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f32m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
-  return vcompress_vm_f32m8_tu(mask, maskedoff, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f32m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
-  return vcompress_vm_f64m1_tu(mask, maskedoff, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f64m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
-  return vcompress_vm_f64m2_tu(mask, maskedoff, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f64m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
-  return vcompress_vm_f64m4_tu(mask, maskedoff, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f64m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
-  return vcompress_vm_f64m8_tu(mask, maskedoff, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f64m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) {
-  return vcompress_vm_i8mf8_tu(mask, maskedoff, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i8mf8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu(
@@ -156,8 +156,8 @@ vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) {
-  return vcompress_vm_i8mf4_tu(mask, maskedoff, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i8mf4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu(
@@ -165,8 +165,8 @@ vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) {
-  return vcompress_vm_i8mf2_tu(mask, maskedoff, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i8mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu(
@@ -174,8 +174,8 @@ vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t vl) {
-  return vcompress_vm_i8m1_tu(mask, maskedoff, src, vl);
+vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i8m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu(
@@ -183,8 +183,8 @@ vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t vl) {
-  return vcompress_vm_i8m2_tu(mask, maskedoff, src, vl);
+vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i8m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu(
@@ -192,8 +192,8 @@ vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t vl) {
-  return vcompress_vm_i8m4_tu(mask, maskedoff, src, vl);
+vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i8m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu(
@@ -201,8 +201,8 @@ vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t vl) {
-  return vcompress_vm_i8m8_tu(mask, maskedoff, src, vl);
+vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_i8m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu(
@@ -210,8 +210,8 @@ vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
-  return vcompress_vm_i16mf4_tu(mask, maskedoff, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i16mf4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu(
@@ -219,8 +219,8 @@ vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
-  return vcompress_vm_i16mf2_tu(mask, maskedoff, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i16mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu(
@@ -228,8 +228,8 @@ vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t vl) {
-  return vcompress_vm_i16m1_tu(mask, maskedoff, src, vl);
+vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i16m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu(
@@ -237,8 +237,8 @@ vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t vl) {
-  return vcompress_vm_i16m2_tu(mask, maskedoff, src, vl);
+vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i16m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu(
@@ -246,8 +246,8 @@ vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t vl) {
-  return vcompress_vm_i16m4_tu(mask, maskedoff, src, vl);
+vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i16m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu(
@@ -255,8 +255,8 @@ vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t vl) {
-  return vcompress_vm_i16m8_tu(mask, maskedoff, src, vl);
+vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i16m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
@@ -264,8 +264,8 @@ vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
-  return vcompress_vm_i32mf2_tu(mask, maskedoff, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i32mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu(
@@ -273,8 +273,8 @@ vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t vl) {
-  return vcompress_vm_i32m1_tu(mask, maskedoff, src, vl);
+vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i32m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu(
@@ -282,8 +282,8 @@ vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t vl) {
-  return vcompress_vm_i32m2_tu(mask, maskedoff, src, vl);
+vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i32m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu(
@@ -291,8 +291,8 @@ vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t vl) {
-  return vcompress_vm_i32m4_tu(mask, maskedoff, src, vl);
+vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i32m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu(
@@ -300,8 +300,8 @@ vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint3
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t vl) {
-  return vcompress_vm_i32m8_tu(mask, maskedoff, src, vl);
+vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i32m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu(
@@ -309,8 +309,8 @@ vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint3
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t vl) {
-  return vcompress_vm_i64m1_tu(mask, maskedoff, src, vl);
+vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i64m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu(
@@ -318,8 +318,8 @@ vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t vl) {
-  return vcompress_vm_i64m2_tu(mask, maskedoff, src, vl);
+vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i64m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu(
@@ -327,8 +327,8 @@ vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t vl) {
-  return vcompress_vm_i64m4_tu(mask, maskedoff, src, vl);
+vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i64m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu(
@@ -336,8 +336,8 @@ vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t vl) {
-  return vcompress_vm_i64m8_tu(mask, maskedoff, src, vl);
+vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i64m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu(
@@ -345,8 +345,8 @@ vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint6
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) {
-  return vcompress_vm_u8mf8_tu(mask, maskedoff, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u8mf8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu(
@@ -354,8 +354,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) {
-  return vcompress_vm_u8mf4_tu(mask, maskedoff, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u8mf4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu(
@@ -363,8 +363,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) {
-  return vcompress_vm_u8mf2_tu(mask, maskedoff, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u8mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu(
@@ -372,8 +372,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) {
-  return vcompress_vm_u8m1_tu(mask, maskedoff, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u8m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu(
@@ -381,8 +381,8 @@ vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) {
-  return vcompress_vm_u8m2_tu(mask, maskedoff, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u8m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu(
@@ -390,8 +390,8 @@ vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) {
-  return vcompress_vm_u8m4_tu(mask, maskedoff, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u8m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu(
@@ -399,8 +399,8 @@ vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) {
-  return vcompress_vm_u8m8_tu(mask, maskedoff, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_u8m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu(
@@ -408,8 +408,8 @@ vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
-  return vcompress_vm_u16mf4_tu(mask, maskedoff, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u16mf4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu(
@@ -417,8 +417,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
-  return vcompress_vm_u16mf2_tu(mask, maskedoff, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u16mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu(
@@ -426,8 +426,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) {
-  return vcompress_vm_u16m1_tu(mask, maskedoff, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u16m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu(
@@ -435,8 +435,8 @@ vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) {
-  return vcompress_vm_u16m2_tu(mask, maskedoff, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u16m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu(
@@ -444,8 +444,8 @@ vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) {
-  return vcompress_vm_u16m4_tu(mask, maskedoff, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u16m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu(
@@ -453,8 +453,8 @@ vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) {
-  return vcompress_vm_u16m8_tu(mask, maskedoff, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u16m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
@@ -462,8 +462,8 @@ vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
-  return vcompress_vm_u32mf2_tu(mask, maskedoff, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u32mf2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu(
@@ -471,8 +471,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) {
-  return vcompress_vm_u32m1_tu(mask, maskedoff, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u32m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu(
@@ -480,8 +480,8 @@ vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) {
-  return vcompress_vm_u32m2_tu(mask, maskedoff, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u32m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu(
@@ -489,8 +489,8 @@ vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) {
-  return vcompress_vm_u32m4_tu(mask, maskedoff, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u32m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu(
@@ -498,8 +498,8 @@ vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) {
-  return vcompress_vm_u32m8_tu(mask, maskedoff, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u32m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu(
@@ -507,8 +507,8 @@ vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) {
-  return vcompress_vm_u64m1_tu(mask, maskedoff, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u64m1_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu(
@@ -516,8 +516,8 @@ vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) {
-  return vcompress_vm_u64m2_tu(mask, maskedoff, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u64m2_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu(
@@ -525,8 +525,8 @@ vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) {
-  return vcompress_vm_u64m4_tu(mask, maskedoff, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u64m4_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu(
@@ -534,8 +534,8 @@ vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) {
-  return vcompress_vm_u64m8_tu(mask, maskedoff, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u64m8_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta(
@@ -543,8 +543,8 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
-  return vcompress_vm_f16mf4_ta(mask, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f16mf4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta(
@@ -552,8 +552,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
-  return vcompress_vm_f16mf2_ta(mask, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f16mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta(
@@ -561,8 +561,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t vl) {
-  return vcompress_vm_f16m1_ta(mask, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1_ta(vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f16m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta(
@@ -570,8 +570,8 @@ vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t vl) {
-  return vcompress_vm_f16m2_ta(mask, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2_ta(vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f16m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta(
@@ -579,8 +579,8 @@ vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t vl) {
-  return vcompress_vm_f16m4_ta(mask, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4_ta(vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f16m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta(
@@ -588,8 +588,8 @@ vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t vl) {
-  return vcompress_vm_f16m8_ta(mask, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8_ta(vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_f16m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
@@ -597,8 +597,8 @@ vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
-  return vcompress_vm_f32mf2_ta(mask, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f32mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta(
@@ -606,8 +606,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t vl) {
-  return vcompress_vm_f32m1_ta(mask, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1_ta(vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f32m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta(
@@ -615,8 +615,8 @@ vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t vl) {
-  return vcompress_vm_f32m2_ta(mask, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2_ta(vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f32m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta(
@@ -624,8 +624,8 @@ vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t vl) {
-  return vcompress_vm_f32m4_ta(mask, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4_ta(vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f32m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta(
@@ -633,8 +633,8 @@ vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t vl) {
-  return vcompress_vm_f32m8_ta(mask, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8_ta(vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_f32m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta(
@@ -642,8 +642,8 @@ vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t vl) {
-  return vcompress_vm_f64m1_ta(mask, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1_ta(vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_f64m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta(
@@ -651,8 +651,8 @@ vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t vl) {
-  return vcompress_vm_f64m2_ta(mask, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2_ta(vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_f64m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta(
@@ -660,8 +660,8 @@ vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t vl) {
-  return vcompress_vm_f64m4_ta(mask, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4_ta(vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_f64m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta(
@@ -669,8 +669,8 @@ vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t vl) {
-  return vcompress_vm_f64m8_ta(mask, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8_ta(vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_f64m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta(
@@ -678,8 +678,8 @@ vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl) {
-  return vcompress_vm_i8mf8_ta(mask, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8_ta(vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i8mf8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta(
@@ -687,8 +687,8 @@ vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl) {
-  return vcompress_vm_i8mf4_ta(mask, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4_ta(vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i8mf4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta(
@@ -696,8 +696,8 @@ vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl) {
-  return vcompress_vm_i8mf2_ta(mask, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2_ta(vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i8mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta(
@@ -705,8 +705,8 @@ vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) {
-  return vcompress_vm_i8m1_ta(mask, src, vl);
+vint8m1_t test_vcompress_vm_i8m1_ta(vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i8m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta(
@@ -714,8 +714,8 @@ vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) {
-  return vcompress_vm_i8m2_ta(mask, src, vl);
+vint8m2_t test_vcompress_vm_i8m2_ta(vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i8m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta(
@@ -723,8 +723,8 @@ vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) {
-  return vcompress_vm_i8m4_ta(mask, src, vl);
+vint8m4_t test_vcompress_vm_i8m4_ta(vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i8m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta(
@@ -732,8 +732,8 @@ vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) {
-  return vcompress_vm_i8m8_ta(mask, src, vl);
+vint8m8_t test_vcompress_vm_i8m8_ta(vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_i8m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta(
@@ -741,8 +741,8 @@ vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t vl) {
-  return vcompress_vm_i16mf4_ta(mask, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4_ta(vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i16mf4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta(
@@ -750,8 +750,8 @@ vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t vl) {
-  return vcompress_vm_i16mf2_ta(mask, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2_ta(vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i16mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta(
@@ -759,8 +759,8 @@ vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl) {
-  return vcompress_vm_i16m1_ta(mask, src, vl);
+vint16m1_t test_vcompress_vm_i16m1_ta(vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i16m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta(
@@ -768,8 +768,8 @@ vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl) {
-  return vcompress_vm_i16m2_ta(mask, src, vl);
+vint16m2_t test_vcompress_vm_i16m2_ta(vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i16m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta(
@@ -777,8 +777,8 @@ vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl) {
-  return vcompress_vm_i16m4_ta(mask, src, vl);
+vint16m4_t test_vcompress_vm_i16m4_ta(vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i16m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta(
@@ -786,8 +786,8 @@ vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl) {
-  return vcompress_vm_i16m8_ta(mask, src, vl);
+vint16m8_t test_vcompress_vm_i16m8_ta(vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_i16m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
@@ -795,8 +795,8 @@ vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) {
-  return vcompress_vm_i32mf2_ta(mask, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2_ta(vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i32mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta(
@@ -804,8 +804,8 @@ vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl) {
-  return vcompress_vm_i32m1_ta(mask, src, vl);
+vint32m1_t test_vcompress_vm_i32m1_ta(vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i32m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta(
@@ -813,8 +813,8 @@ vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl) {
-  return vcompress_vm_i32m2_ta(mask, src, vl);
+vint32m2_t test_vcompress_vm_i32m2_ta(vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i32m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta(
@@ -822,8 +822,8 @@ vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl) {
-  return vcompress_vm_i32m4_ta(mask, src, vl);
+vint32m4_t test_vcompress_vm_i32m4_ta(vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i32m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta(
@@ -831,8 +831,8 @@ vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl) {
-  return vcompress_vm_i32m8_ta(mask, src, vl);
+vint32m8_t test_vcompress_vm_i32m8_ta(vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_i32m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta(
@@ -840,8 +840,8 @@ vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl) {
-  return vcompress_vm_i64m1_ta(mask, src, vl);
+vint64m1_t test_vcompress_vm_i64m1_ta(vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_i64m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta(
@@ -849,8 +849,8 @@ vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl) {
-  return vcompress_vm_i64m2_ta(mask, src, vl);
+vint64m2_t test_vcompress_vm_i64m2_ta(vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_i64m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta(
@@ -858,8 +858,8 @@ vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl) {
-  return vcompress_vm_i64m4_ta(mask, src, vl);
+vint64m4_t test_vcompress_vm_i64m4_ta(vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_i64m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta(
@@ -867,8 +867,8 @@ vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl) {
-  return vcompress_vm_i64m8_ta(mask, src, vl);
+vint64m8_t test_vcompress_vm_i64m8_ta(vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_i64m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta(
@@ -876,8 +876,8 @@ vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t vl) {
-  return vcompress_vm_u8mf8_ta(mask, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8_ta(vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u8mf8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta(
@@ -885,8 +885,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t vl) {
-  return vcompress_vm_u8mf4_ta(mask, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4_ta(vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u8mf4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta(
@@ -894,8 +894,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t vl) {
-  return vcompress_vm_u8mf2_ta(mask, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2_ta(vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u8mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta(
@@ -903,8 +903,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) {
-  return vcompress_vm_u8m1_ta(mask, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1_ta(vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u8m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta(
@@ -912,8 +912,8 @@ vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) {
-  return vcompress_vm_u8m2_ta(mask, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2_ta(vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u8m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta(
@@ -921,8 +921,8 @@ vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) {
-  return vcompress_vm_u8m4_ta(mask, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4_ta(vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u8m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta(
@@ -930,8 +930,8 @@ vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) {
-  return vcompress_vm_u8m8_ta(mask, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8_ta(vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_vm_u8m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta(
@@ -939,8 +939,8 @@ vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_t vl) {
-  return vcompress_vm_u16mf4_ta(mask, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4_ta(vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u16mf4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta(
@@ -948,8 +948,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_t vl) {
-  return vcompress_vm_u16mf2_ta(mask, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2_ta(vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u16mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta(
@@ -957,8 +957,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t vl) {
-  return vcompress_vm_u16m1_ta(mask, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1_ta(vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u16m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta(
@@ -966,8 +966,8 @@ vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl) {
-  return vcompress_vm_u16m2_ta(mask, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2_ta(vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u16m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta(
@@ -975,8 +975,8 @@ vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl) {
-  return vcompress_vm_u16m4_ta(mask, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4_ta(vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u16m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta(
@@ -984,8 +984,8 @@ vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl) {
-  return vcompress_vm_u16m8_ta(mask, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8_ta(vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_vm_u16m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
@@ -993,8 +993,8 @@ vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) {
-  return vcompress_vm_u32mf2_ta(mask, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2_ta(vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u32mf2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta(
@@ -1002,8 +1002,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t vl) {
-  return vcompress_vm_u32m1_ta(mask, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1_ta(vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u32m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta(
@@ -1011,8 +1011,8 @@ vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t vl) {
-  return vcompress_vm_u32m2_ta(mask, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2_ta(vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u32m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta(
@@ -1020,8 +1020,8 @@ vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl) {
-  return vcompress_vm_u32m4_ta(mask, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4_ta(vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u32m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta(
@@ -1029,8 +1029,8 @@ vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl) {
-  return vcompress_vm_u32m8_ta(mask, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8_ta(vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_vm_u32m8_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta(
@@ -1038,8 +1038,8 @@ vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t vl) {
-  return vcompress_vm_u64m1_ta(mask, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1_ta(vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_vm_u64m1_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta(
@@ -1047,8 +1047,8 @@ vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t vl) {
-  return vcompress_vm_u64m2_ta(mask, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2_ta(vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_vm_u64m2_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta(
@@ -1056,8 +1056,8 @@ vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t vl) {
-  return vcompress_vm_u64m4_ta(mask, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4_ta(vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_vm_u64m4_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta(
@@ -1065,7 +1065,7 @@ vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8_ta(vbool8_t mask, vuint64m8_t src, size_t vl) {
-  return vcompress_vm_u64m8_ta(mask, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8_ta(vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_vm_u64m8_ta(src, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c
index 9b361ace483fe..c86a69e0ce4d2 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vfmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf4_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf2_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge_vfm_f16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32mf2_tu(mask, maskedoff, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf4_ta(mask, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta(
@@ -156,8 +156,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Flo
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16mf2_ta(mask, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta(
@@ -165,8 +165,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Flo
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m1_ta(mask, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta(
@@ -174,8 +174,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m2_ta(mask, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta(
@@ -183,8 +183,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m4_ta(mask, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta(
@@ -192,8 +192,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_vfm_f16m8_ta(mask, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge_vfm_f16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
@@ -201,8 +201,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32mf2_ta(mask, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta(
@@ -210,8 +210,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, floa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m1_ta(mask, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta(
@@ -219,8 +219,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m2_ta(mask, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta(
@@ -228,8 +228,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m4_ta(mask, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta(
@@ -237,8 +237,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge_vfm_f32m8_ta(mask, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge_vfm_f32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta(
@@ -246,8 +246,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m1_ta(mask, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge_vfm_f64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta(
@@ -255,8 +255,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m2_ta(mask, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge_vfm_f64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta(
@@ -264,8 +264,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m4_ta(mask, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge_vfm_f64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta(
@@ -273,7 +273,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge_vfm_f64m8_ta(mask, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge_vfm_f64m8_ta(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
index 44b7ca4dd45d4..a716445114c3a 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/non-overloaded/vmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_i8mf8_tu(mask, maskedoff, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i8mf8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu(
@@ -21,8 +21,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf8_tu(mask, maskedoff, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i8mf8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu(
@@ -30,8 +30,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_i8mf4_tu(mask, maskedoff, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i8mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu(
@@ -39,8 +39,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf4_tu(mask, maskedoff, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i8mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu(
@@ -48,8 +48,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_i8mf2_tu(mask, maskedoff, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i8mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu(
@@ -57,8 +57,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf2_tu(mask, maskedoff, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i8mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu(
@@ -66,8 +66,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge_vvm_i8m1_tu(mask, maskedoff, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i8m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu(
@@ -75,8 +75,8 @@ vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m1_tu(mask, maskedoff, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i8m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu(
@@ -84,8 +84,8 @@ vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge_vvm_i8m2_tu(mask, maskedoff, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i8m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu(
@@ -93,8 +93,8 @@ vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m2_tu(mask, maskedoff, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i8m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu(
@@ -102,8 +102,8 @@ vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge_vvm_i8m4_tu(mask, maskedoff, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i8m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu(
@@ -111,8 +111,8 @@ vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m4_tu(mask, maskedoff, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i8m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu(
@@ -120,8 +120,8 @@ vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge_vvm_i8m8_tu(mask, maskedoff, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_i8m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu(
@@ -129,8 +129,8 @@ vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m8_tu(mask, maskedoff, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_i8m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu(
@@ -138,8 +138,8 @@ vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_i16mf4_tu(mask, maskedoff, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu(
@@ -147,8 +147,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf4_tu(mask, maskedoff, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu(
@@ -156,8 +156,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_i16mf2_tu(mask, maskedoff, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu(
@@ -165,8 +165,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf2_tu(mask, maskedoff, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu(
@@ -174,8 +174,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge_vvm_i16m1_tu(mask, maskedoff, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu(
@@ -183,8 +183,8 @@ vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m1_tu(mask, maskedoff, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu(
@@ -192,8 +192,8 @@ vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge_vvm_i16m2_tu(mask, maskedoff, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu(
@@ -201,8 +201,8 @@ vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m2_tu(mask, maskedoff, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu(
@@ -210,8 +210,8 @@ vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge_vvm_i16m4_tu(mask, maskedoff, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu(
@@ -219,8 +219,8 @@ vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m4_tu(mask, maskedoff, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu(
@@ -228,8 +228,8 @@ vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge_vvm_i16m8_tu(mask, maskedoff, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu(
@@ -237,8 +237,8 @@ vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m8_tu(mask, maskedoff, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
@@ -246,8 +246,8 @@ vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_i32mf2_tu(mask, maskedoff, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
@@ -255,8 +255,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32mf2_tu(mask, maskedoff, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu(
@@ -264,8 +264,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge_vvm_i32m1_tu(mask, maskedoff, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu(
@@ -273,8 +273,8 @@ vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m1_tu(mask, maskedoff, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu(
@@ -282,8 +282,8 @@ vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge_vvm_i32m2_tu(mask, maskedoff, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu(
@@ -291,8 +291,8 @@ vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m2_tu(mask, maskedoff, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu(
@@ -300,8 +300,8 @@ vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge_vvm_i32m4_tu(mask, maskedoff, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu(
@@ -309,8 +309,8 @@ vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m4_tu(mask, maskedoff, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu(
@@ -318,8 +318,8 @@ vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge_vvm_i32m8_tu(mask, maskedoff, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu(
@@ -327,8 +327,8 @@ vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m8_tu(mask, maskedoff, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu(
@@ -336,8 +336,8 @@ vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge_vvm_i64m1_tu(mask, maskedoff, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu(
@@ -345,8 +345,8 @@ vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m1_tu(mask, maskedoff, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu(
@@ -354,8 +354,8 @@ vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge_vvm_i64m2_tu(mask, maskedoff, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu(
@@ -363,8 +363,8 @@ vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m2_tu(mask, maskedoff, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu(
@@ -372,8 +372,8 @@ vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge_vvm_i64m4_tu(mask, maskedoff, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu(
@@ -381,8 +381,8 @@ vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m4_tu(mask, maskedoff, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu(
@@ -390,8 +390,8 @@ vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge_vvm_i64m8_tu(mask, maskedoff, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu(
@@ -399,8 +399,8 @@ vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m8_tu(mask, maskedoff, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu(
@@ -408,8 +408,8 @@ vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_u8mf8_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u8mf8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu(
@@ -417,8 +417,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf8_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u8mf8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu(
@@ -426,8 +426,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_u8mf4_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u8mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu(
@@ -435,8 +435,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf4_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u8mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu(
@@ -444,8 +444,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_u8mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u8mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu(
@@ -453,8 +453,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u8mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu(
@@ -462,8 +462,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge_vvm_u8m1_tu(mask, maskedoff, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u8m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu(
@@ -471,8 +471,8 @@ vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m1_tu(mask, maskedoff, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u8m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu(
@@ -480,8 +480,8 @@ vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge_vvm_u8m2_tu(mask, maskedoff, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u8m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu(
@@ -489,8 +489,8 @@ vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m2_tu(mask, maskedoff, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u8m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu(
@@ -498,8 +498,8 @@ vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge_vvm_u8m4_tu(mask, maskedoff, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u8m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu(
@@ -507,8 +507,8 @@ vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m4_tu(mask, maskedoff, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u8m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu(
@@ -516,8 +516,8 @@ vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge_vvm_u8m8_tu(mask, maskedoff, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_u8m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu(
@@ -525,8 +525,8 @@ vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m8_tu(mask, maskedoff, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_u8m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu(
@@ -534,8 +534,8 @@ vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_u16mf4_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu(
@@ -543,8 +543,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf4_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu(
@@ -552,8 +552,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_u16mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu(
@@ -561,8 +561,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu(
@@ -570,8 +570,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge_vvm_u16m1_tu(mask, maskedoff, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu(
@@ -579,8 +579,8 @@ vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m1_tu(mask, maskedoff, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu(
@@ -588,8 +588,8 @@ vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge_vvm_u16m2_tu(mask, maskedoff, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu(
@@ -597,8 +597,8 @@ vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m2_tu(mask, maskedoff, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu(
@@ -606,8 +606,8 @@ vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge_vvm_u16m4_tu(mask, maskedoff, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu(
@@ -615,8 +615,8 @@ vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m4_tu(mask, maskedoff, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu(
@@ -624,8 +624,8 @@ vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge_vvm_u16m8_tu(mask, maskedoff, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu(
@@ -633,8 +633,8 @@ vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m8_tu(mask, maskedoff, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
@@ -642,8 +642,8 @@ vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_u32mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
@@ -651,8 +651,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32mf2_tu(mask, maskedoff, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu(
@@ -660,8 +660,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge_vvm_u32m1_tu(mask, maskedoff, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu(
@@ -669,8 +669,8 @@ vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m1_tu(mask, maskedoff, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu(
@@ -678,8 +678,8 @@ vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge_vvm_u32m2_tu(mask, maskedoff, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu(
@@ -687,8 +687,8 @@ vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m2_tu(mask, maskedoff, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu(
@@ -696,8 +696,8 @@ vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge_vvm_u32m4_tu(mask, maskedoff, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu(
@@ -705,8 +705,8 @@ vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m4_tu(mask, maskedoff, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu(
@@ -714,8 +714,8 @@ vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge_vvm_u32m8_tu(mask, maskedoff, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu(
@@ -723,8 +723,8 @@ vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m8_tu(mask, maskedoff, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu(
@@ -732,8 +732,8 @@ vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge_vvm_u64m1_tu(mask, maskedoff, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu(
@@ -741,8 +741,8 @@ vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m1_tu(mask, maskedoff, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu(
@@ -750,8 +750,8 @@ vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge_vvm_u64m2_tu(mask, maskedoff, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu(
@@ -759,8 +759,8 @@ vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m2_tu(mask, maskedoff, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu(
@@ -768,8 +768,8 @@ vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge_vvm_u64m4_tu(mask, maskedoff, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu(
@@ -777,8 +777,8 @@ vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m4_tu(mask, maskedoff, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu(
@@ -786,8 +786,8 @@ vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge_vvm_u64m8_tu(mask, maskedoff, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu(
@@ -795,8 +795,8 @@ vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m8_tu(mask, maskedoff, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta(
@@ -804,8 +804,8 @@ vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_i8mf8_ta(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i8mf8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta(
@@ -813,8 +813,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf8_ta(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8_ta(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i8mf8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta(
@@ -822,8 +822,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_i8mf4_ta(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i8mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta(
@@ -831,8 +831,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf4_ta(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4_ta(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i8mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta(
@@ -840,8 +840,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_i8mf2_ta(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i8mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta(
@@ -849,8 +849,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8mf2_ta(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2_ta(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i8mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta(
@@ -858,8 +858,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge_vvm_i8m1_ta(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1_ta(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i8m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta(
@@ -867,8 +867,8 @@ vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m1_ta(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1_ta(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i8m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta(
@@ -876,8 +876,8 @@ vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge_vvm_i8m2_ta(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2_ta(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i8m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta(
@@ -885,8 +885,8 @@ vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m2_ta(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2_ta(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i8m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta(
@@ -894,8 +894,8 @@ vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge_vvm_i8m4_ta(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4_ta(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i8m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta(
@@ -903,8 +903,8 @@ vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m4_ta(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4_ta(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i8m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta(
@@ -912,8 +912,8 @@ vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge_vvm_i8m8_ta(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8_ta(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_i8m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta(
@@ -921,8 +921,8 @@ vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge_vxm_i8m8_ta(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8_ta(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_i8m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta(
@@ -930,8 +930,8 @@ vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_i16mf4_ta(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta(
@@ -939,8 +939,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf4_ta(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4_ta(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta(
@@ -948,8 +948,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_i16mf2_ta(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta(
@@ -957,8 +957,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16mf2_ta(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2_ta(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta(
@@ -966,8 +966,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge_vvm_i16m1_ta(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1_ta(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta(
@@ -975,8 +975,8 @@ vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m1_ta(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1_ta(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta(
@@ -984,8 +984,8 @@ vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge_vvm_i16m2_ta(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2_ta(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta(
@@ -993,8 +993,8 @@ vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m2_ta(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2_ta(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta(
@@ -1002,8 +1002,8 @@ vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge_vvm_i16m4_ta(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4_ta(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta(
@@ -1011,8 +1011,8 @@ vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m4_ta(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4_ta(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta(
@@ -1020,8 +1020,8 @@ vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge_vvm_i16m8_ta(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8_ta(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_i16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta(
@@ -1029,8 +1029,8 @@ vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge_vxm_i16m8_ta(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8_ta(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_i16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
@@ -1038,8 +1038,8 @@ vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_i32mf2_ta(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
@@ -1047,8 +1047,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32mf2_ta(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta(
@@ -1056,8 +1056,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge_vvm_i32m1_ta(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1_ta(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta(
@@ -1065,8 +1065,8 @@ vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m1_ta(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1_ta(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta(
@@ -1074,8 +1074,8 @@ vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge_vvm_i32m2_ta(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2_ta(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta(
@@ -1083,8 +1083,8 @@ vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m2_ta(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2_ta(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta(
@@ -1092,8 +1092,8 @@ vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge_vvm_i32m4_ta(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4_ta(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta(
@@ -1101,8 +1101,8 @@ vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m4_ta(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4_ta(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta(
@@ -1110,8 +1110,8 @@ vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge_vvm_i32m8_ta(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8_ta(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_i32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta(
@@ -1119,8 +1119,8 @@ vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge_vxm_i32m8_ta(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8_ta(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_i32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta(
@@ -1128,8 +1128,8 @@ vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge_vvm_i64m1_ta(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1_ta(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_i64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta(
@@ -1137,8 +1137,8 @@ vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m1_ta(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1_ta(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_i64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta(
@@ -1146,8 +1146,8 @@ vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge_vvm_i64m2_ta(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2_ta(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_i64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta(
@@ -1155,8 +1155,8 @@ vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m2_ta(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2_ta(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_i64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta(
@@ -1164,8 +1164,8 @@ vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge_vvm_i64m4_ta(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4_ta(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_i64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta(
@@ -1173,8 +1173,8 @@ vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m4_ta(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4_ta(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_i64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta(
@@ -1182,8 +1182,8 @@ vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge_vvm_i64m8_ta(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8_ta(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_i64m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta(
@@ -1191,8 +1191,8 @@ vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge_vxm_i64m8_ta(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8_ta(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_i64m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta(
@@ -1200,8 +1200,8 @@ vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge_vvm_u8mf8_ta(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u8mf8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta(
@@ -1209,8 +1209,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf8_ta(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u8mf8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta(
@@ -1218,8 +1218,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge_vvm_u8mf4_ta(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u8mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta(
@@ -1227,8 +1227,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf4_ta(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u8mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta(
@@ -1236,8 +1236,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge_vvm_u8mf2_ta(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u8mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta(
@@ -1245,8 +1245,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8mf2_ta(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u8mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta(
@@ -1254,8 +1254,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge_vvm_u8m1_ta(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u8m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta(
@@ -1263,8 +1263,8 @@ vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m1_ta(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1_ta(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u8m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta(
@@ -1272,8 +1272,8 @@ vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge_vvm_u8m2_ta(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u8m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta(
@@ -1281,8 +1281,8 @@ vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m2_ta(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2_ta(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u8m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta(
@@ -1290,8 +1290,8 @@ vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge_vvm_u8m4_ta(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u8m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta(
@@ -1299,8 +1299,8 @@ vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m4_ta(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4_ta(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u8m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta(
@@ -1308,8 +1308,8 @@ vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge_vvm_u8m8_ta(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vvm_u8m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta(
@@ -1317,8 +1317,8 @@ vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_vxm_u8m8_ta(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8_ta(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_vxm_u8m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta(
@@ -1326,8 +1326,8 @@ vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge_vvm_u16mf4_ta(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta(
@@ -1335,8 +1335,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf4_ta(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta(
@@ -1344,8 +1344,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge_vvm_u16mf2_ta(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta(
@@ -1353,8 +1353,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16mf2_ta(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta(
@@ -1362,8 +1362,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge_vvm_u16m1_ta(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta(
@@ -1371,8 +1371,8 @@ vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m1_ta(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1_ta(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta(
@@ -1380,8 +1380,8 @@ vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge_vvm_u16m2_ta(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta(
@@ -1389,8 +1389,8 @@ vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m2_ta(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2_ta(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta(
@@ -1398,8 +1398,8 @@ vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge_vvm_u16m4_ta(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta(
@@ -1407,8 +1407,8 @@ vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m4_ta(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4_ta(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta(
@@ -1416,8 +1416,8 @@ vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge_vvm_u16m8_ta(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_u16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta(
@@ -1425,8 +1425,8 @@ vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge_vxm_u16m8_ta(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8_ta(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vxm_u16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
@@ -1434,8 +1434,8 @@ vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge_vvm_u32mf2_ta(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
@@ -1443,8 +1443,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32mf2_ta(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta(
@@ -1452,8 +1452,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge_vvm_u32m1_ta(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta(
@@ -1461,8 +1461,8 @@ vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m1_ta(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1_ta(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta(
@@ -1470,8 +1470,8 @@ vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge_vvm_u32m2_ta(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta(
@@ -1479,8 +1479,8 @@ vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m2_ta(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2_ta(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta(
@@ -1488,8 +1488,8 @@ vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge_vvm_u32m4_ta(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta(
@@ -1497,8 +1497,8 @@ vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m4_ta(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4_ta(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta(
@@ -1506,8 +1506,8 @@ vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge_vvm_u32m8_ta(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_u32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta(
@@ -1515,8 +1515,8 @@ vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge_vxm_u32m8_ta(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8_ta(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vxm_u32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta(
@@ -1524,8 +1524,8 @@ vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge_vvm_u64m1_ta(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_u64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta(
@@ -1533,8 +1533,8 @@ vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m1_ta(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1_ta(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vxm_u64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta(
@@ -1542,8 +1542,8 @@ vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge_vvm_u64m2_ta(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_u64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta(
@@ -1551,8 +1551,8 @@ vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m2_ta(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2_ta(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vxm_u64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta(
@@ -1560,8 +1560,8 @@ vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge_vvm_u64m4_ta(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_u64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta(
@@ -1569,8 +1569,8 @@ vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m4_ta(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4_ta(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vxm_u64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta(
@@ -1578,8 +1578,8 @@ vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge_vvm_u64m8_ta(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_u64m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta(
@@ -1587,8 +1587,8 @@ vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge_vxm_u64m8_ta(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8_ta(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vxm_u64m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu(
@@ -1596,8 +1596,8 @@ vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge_vvm_f16mf4_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f16mf4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu(
@@ -1605,8 +1605,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge_vvm_f16mf2_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f16mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu(
@@ -1614,8 +1614,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge_vvm_f16m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f16m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu(
@@ -1623,8 +1623,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge_vvm_f16m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f16m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu(
@@ -1632,8 +1632,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge_vvm_f16m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f16m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu(
@@ -1641,8 +1641,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge_vvm_f16m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_f16m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
@@ -1650,8 +1650,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge_vvm_f32mf2_tu(mask, maskedoff, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f32mf2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu(
@@ -1659,8 +1659,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge_vvm_f32m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f32m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu(
@@ -1668,8 +1668,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge_vvm_f32m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f32m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu(
@@ -1677,8 +1677,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge_vvm_f32m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f32m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu(
@@ -1686,8 +1686,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge_vvm_f32m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f32m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu(
@@ -1695,8 +1695,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge_vvm_f64m1_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f64m1_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu(
@@ -1704,8 +1704,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge_vvm_f64m2_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f64m2_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu(
@@ -1713,8 +1713,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge_vvm_f64m4_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f64m4_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu(
@@ -1722,8 +1722,8 @@ vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge_vvm_f64m8_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f64m8_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta(
@@ -1731,8 +1731,8 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge_vvm_f16mf4_ta(mask, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f16mf4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta(
@@ -1740,8 +1740,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge_vvm_f16mf2_ta(mask, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f16mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta(
@@ -1749,8 +1749,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge_vvm_f16m1_ta(mask, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1_ta(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f16m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta(
@@ -1758,8 +1758,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge_vvm_f16m2_ta(mask, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2_ta(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f16m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta(
@@ -1767,8 +1767,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge_vvm_f16m4_ta(mask, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4_ta(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f16m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta(
@@ -1776,8 +1776,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge_vvm_f16m8_ta(mask, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8_ta(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_vvm_f16m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
@@ -1785,8 +1785,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge_vvm_f32mf2_ta(mask, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f32mf2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta(
@@ -1794,8 +1794,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge_vvm_f32m1_ta(mask, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1_ta(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f32m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta(
@@ -1803,8 +1803,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge_vvm_f32m2_ta(mask, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2_ta(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f32m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta(
@@ -1812,8 +1812,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge_vvm_f32m4_ta(mask, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4_ta(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f32m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta(
@@ -1821,8 +1821,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge_vvm_f32m8_ta(mask, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8_ta(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_vvm_f32m8_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta(
@@ -1830,8 +1830,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge_vvm_f64m1_ta(mask, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1_ta(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_vvm_f64m1_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta(
@@ -1839,8 +1839,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge_vvm_f64m2_ta(mask, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2_ta(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_vvm_f64m2_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta(
@@ -1848,8 +1848,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge_vvm_f64m4_ta(mask, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4_ta(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_vvm_f64m4_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta(
@@ -1857,7 +1857,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge_vvm_f64m8_ta(mask, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8_ta(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_vvm_f64m8_ta(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
index a39834e0b441a..dc229ad21aefc 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vcompress.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_tu(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_tu(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_tu(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vcompress_vm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_tu(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vcompress_vm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_tu(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vcompress_vm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_tu(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vcompress_vm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_tu(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedof
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_tu(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vcompress_vm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_tu(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vcompress_vm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_tu(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vcompress_vm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_tu(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vcompress_vm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_tu(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vcompress_vm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_tu(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vcompress_vm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_tu(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vcompress_vm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_tu(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vcompress_vm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_tu(
@@ -156,8 +156,8 @@ vint8mf8_t test_vcompress_vm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_tu(
@@ -165,8 +165,8 @@ vint8mf4_t test_vcompress_vm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_tu(
@@ -174,8 +174,8 @@ vint8mf2_t test_vcompress_vm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8m1_t test_vcompress_vm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_tu(
@@ -183,8 +183,8 @@ vint8m1_t test_vcompress_vm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8m2_t test_vcompress_vm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_tu(
@@ -192,8 +192,8 @@ vint8m2_t test_vcompress_vm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8m4_t test_vcompress_vm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_tu(
@@ -201,8 +201,8 @@ vint8m4_t test_vcompress_vm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint8m8_t test_vcompress_vm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_tu(
@@ -210,8 +210,8 @@ vint8m8_t test_vcompress_vm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_tu(
@@ -219,8 +219,8 @@ vint16mf4_t test_vcompress_vm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_tu(
@@ -228,8 +228,8 @@ vint16mf2_t test_vcompress_vm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16m1_t test_vcompress_vm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_tu(
@@ -237,8 +237,8 @@ vint16m1_t test_vcompress_vm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16m2_t test_vcompress_vm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_tu(
@@ -246,8 +246,8 @@ vint16m2_t test_vcompress_vm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16m4_t test_vcompress_vm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_tu(
@@ -255,8 +255,8 @@ vint16m4_t test_vcompress_vm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint16m8_t test_vcompress_vm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_tu(
@@ -264,8 +264,8 @@ vint16m8_t test_vcompress_vm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_tu(
@@ -273,8 +273,8 @@ vint32mf2_t test_vcompress_vm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint32m1_t test_vcompress_vm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_tu(
@@ -282,8 +282,8 @@ vint32m1_t test_vcompress_vm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint32m2_t test_vcompress_vm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_tu(
@@ -291,8 +291,8 @@ vint32m2_t test_vcompress_vm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint32m4_t test_vcompress_vm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_tu(
@@ -300,8 +300,8 @@ vint32m4_t test_vcompress_vm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint3
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint32m8_t test_vcompress_vm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_tu(
@@ -309,8 +309,8 @@ vint32m8_t test_vcompress_vm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint3
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint64m1_t test_vcompress_vm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_tu(
@@ -318,8 +318,8 @@ vint64m1_t test_vcompress_vm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint64m2_t test_vcompress_vm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_tu(
@@ -327,8 +327,8 @@ vint64m2_t test_vcompress_vm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint64m4_t test_vcompress_vm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_tu(
@@ -336,8 +336,8 @@ vint64m4_t test_vcompress_vm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vint64m8_t test_vcompress_vm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_tu(
@@ -345,8 +345,8 @@ vint64m8_t test_vcompress_vm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint6
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_tu(
@@ -354,8 +354,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_tu(
@@ -363,8 +363,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_tu(
@@ -372,8 +372,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_tu(
@@ -381,8 +381,8 @@ vuint8m1_t test_vcompress_vm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_tu(
@@ -390,8 +390,8 @@ vuint8m2_t test_vcompress_vm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_tu(
@@ -399,8 +399,8 @@ vuint8m4_t test_vcompress_vm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_tu(
@@ -408,8 +408,8 @@ vuint8m8_t test_vcompress_vm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_tu(
@@ -417,8 +417,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_tu(
@@ -426,8 +426,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_tu(
@@ -435,8 +435,8 @@ vuint16m1_t test_vcompress_vm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_tu(
@@ -444,8 +444,8 @@ vuint16m2_t test_vcompress_vm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_tu(
@@ -453,8 +453,8 @@ vuint16m4_t test_vcompress_vm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_tu(
@@ -462,8 +462,8 @@ vuint16m8_t test_vcompress_vm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_tu(
@@ -471,8 +471,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_tu(
@@ -480,8 +480,8 @@ vuint32m1_t test_vcompress_vm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_tu(
@@ -489,8 +489,8 @@ vuint32m2_t test_vcompress_vm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_tu(
@@ -498,8 +498,8 @@ vuint32m4_t test_vcompress_vm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_tu(
@@ -507,8 +507,8 @@ vuint32m8_t test_vcompress_vm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_tu(
@@ -516,8 +516,8 @@ vuint64m1_t test_vcompress_vm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_tu(
@@ -525,8 +525,8 @@ vuint64m2_t test_vcompress_vm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_tu(
@@ -534,8 +534,8 @@ vuint64m4_t test_vcompress_vm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vu
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t src, size_t vl) {
-  return vcompress_tu(mask, maskedoff, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_tu(maskedoff, src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf4_ta(
@@ -543,8 +543,8 @@ vuint64m8_t test_vcompress_vm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vui
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vcompress.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vfloat16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16mf2_ta(
@@ -552,8 +552,8 @@ vfloat16mf4_t test_vcompress_vm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vcompress.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vfloat16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m1_ta(
@@ -561,8 +561,8 @@ vfloat16mf2_t test_vcompress_vm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vcompress.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16m1_t test_vcompress_vm_f16m1_ta(vfloat16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m2_ta(
@@ -570,8 +570,8 @@ vfloat16m1_t test_vcompress_vm_f16m1_ta(vbool16_t mask, vfloat16m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vcompress.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16m2_t test_vcompress_vm_f16m2_ta(vfloat16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m4_ta(
@@ -579,8 +579,8 @@ vfloat16m2_t test_vcompress_vm_f16m2_ta(vbool8_t mask, vfloat16m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vcompress.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16m4_t test_vcompress_vm_f16m4_ta(vfloat16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f16m8_ta(
@@ -588,8 +588,8 @@ vfloat16m4_t test_vcompress_vm_f16m4_ta(vbool4_t mask, vfloat16m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vcompress.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat16m8_t test_vcompress_vm_f16m8_ta(vfloat16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32mf2_ta(
@@ -597,8 +597,8 @@ vfloat16m8_t test_vcompress_vm_f16m8_ta(vbool2_t mask, vfloat16m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vcompress.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vfloat32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m1_ta(
@@ -606,8 +606,8 @@ vfloat32mf2_t test_vcompress_vm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t src, siz
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vcompress.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat32m1_t test_vcompress_vm_f32m1_ta(vfloat32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m2_ta(
@@ -615,8 +615,8 @@ vfloat32m1_t test_vcompress_vm_f32m1_ta(vbool32_t mask, vfloat32m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vcompress.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat32m2_t test_vcompress_vm_f32m2_ta(vfloat32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m4_ta(
@@ -624,8 +624,8 @@ vfloat32m2_t test_vcompress_vm_f32m2_ta(vbool16_t mask, vfloat32m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vcompress.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat32m4_t test_vcompress_vm_f32m4_ta(vfloat32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f32m8_ta(
@@ -633,8 +633,8 @@ vfloat32m4_t test_vcompress_vm_f32m4_ta(vbool8_t mask, vfloat32m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vcompress.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat32m8_t test_vcompress_vm_f32m8_ta(vfloat32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m1_ta(
@@ -642,8 +642,8 @@ vfloat32m8_t test_vcompress_vm_f32m8_ta(vbool4_t mask, vfloat32m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vcompress.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat64m1_t test_vcompress_vm_f64m1_ta(vfloat64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m2_ta(
@@ -651,8 +651,8 @@ vfloat64m1_t test_vcompress_vm_f64m1_ta(vbool64_t mask, vfloat64m1_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vcompress.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat64m2_t test_vcompress_vm_f64m2_ta(vfloat64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m4_ta(
@@ -660,8 +660,8 @@ vfloat64m2_t test_vcompress_vm_f64m2_ta(vbool32_t mask, vfloat64m2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vcompress.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat64m4_t test_vcompress_vm_f64m4_ta(vfloat64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_f64m8_ta(
@@ -669,8 +669,8 @@ vfloat64m4_t test_vcompress_vm_f64m4_ta(vbool16_t mask, vfloat64m4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vcompress.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vfloat64m8_t test_vcompress_vm_f64m8_ta(vfloat64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf8_ta(
@@ -678,8 +678,8 @@ vfloat64m8_t test_vcompress_vm_f64m8_ta(vbool8_t mask, vfloat64m8_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8mf8_t test_vcompress_vm_i8mf8_ta(vint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf4_ta(
@@ -687,8 +687,8 @@ vint8mf8_t test_vcompress_vm_i8mf8_ta(vbool64_t mask, vint8mf8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8mf4_t test_vcompress_vm_i8mf4_ta(vint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8mf2_ta(
@@ -696,8 +696,8 @@ vint8mf4_t test_vcompress_vm_i8mf4_ta(vbool32_t mask, vint8mf4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8mf2_t test_vcompress_vm_i8mf2_ta(vint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m1_ta(
@@ -705,8 +705,8 @@ vint8mf2_t test_vcompress_vm_i8mf2_ta(vbool16_t mask, vint8mf2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8m1_t test_vcompress_vm_i8m1_ta(vint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m2_ta(
@@ -714,8 +714,8 @@ vint8m1_t test_vcompress_vm_i8m1_ta(vbool8_t mask, vint8m1_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8m2_t test_vcompress_vm_i8m2_ta(vint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m4_ta(
@@ -723,8 +723,8 @@ vint8m2_t test_vcompress_vm_i8m2_ta(vbool4_t mask, vint8m2_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8m4_t test_vcompress_vm_i8m4_ta(vint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i8m8_ta(
@@ -732,8 +732,8 @@ vint8m4_t test_vcompress_vm_i8m4_ta(vbool2_t mask, vint8m4_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint8m8_t test_vcompress_vm_i8m8_ta(vint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf4_ta(
@@ -741,8 +741,8 @@ vint8m8_t test_vcompress_vm_i8m8_ta(vbool1_t mask, vint8m8_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16mf4_t test_vcompress_vm_i16mf4_ta(vint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16mf2_ta(
@@ -750,8 +750,8 @@ vint16mf4_t test_vcompress_vm_i16mf4_ta(vbool64_t mask, vint16mf4_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16mf2_t test_vcompress_vm_i16mf2_ta(vint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m1_ta(
@@ -759,8 +759,8 @@ vint16mf2_t test_vcompress_vm_i16mf2_ta(vbool32_t mask, vint16mf2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16m1_t test_vcompress_vm_i16m1_ta(vint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m2_ta(
@@ -768,8 +768,8 @@ vint16m1_t test_vcompress_vm_i16m1_ta(vbool16_t mask, vint16m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16m2_t test_vcompress_vm_i16m2_ta(vint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m4_ta(
@@ -777,8 +777,8 @@ vint16m2_t test_vcompress_vm_i16m2_ta(vbool8_t mask, vint16m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16m4_t test_vcompress_vm_i16m4_ta(vint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i16m8_ta(
@@ -786,8 +786,8 @@ vint16m4_t test_vcompress_vm_i16m4_ta(vbool4_t mask, vint16m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint16m8_t test_vcompress_vm_i16m8_ta(vint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32mf2_ta(
@@ -795,8 +795,8 @@ vint16m8_t test_vcompress_vm_i16m8_ta(vbool2_t mask, vint16m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint32mf2_t test_vcompress_vm_i32mf2_ta(vint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m1_ta(
@@ -804,8 +804,8 @@ vint32mf2_t test_vcompress_vm_i32mf2_ta(vbool64_t mask, vint32mf2_t src, size_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint32m1_t test_vcompress_vm_i32m1_ta(vint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m2_ta(
@@ -813,8 +813,8 @@ vint32m1_t test_vcompress_vm_i32m1_ta(vbool32_t mask, vint32m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint32m2_t test_vcompress_vm_i32m2_ta(vint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m4_ta(
@@ -822,8 +822,8 @@ vint32m2_t test_vcompress_vm_i32m2_ta(vbool16_t mask, vint32m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint32m4_t test_vcompress_vm_i32m4_ta(vint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i32m8_ta(
@@ -831,8 +831,8 @@ vint32m4_t test_vcompress_vm_i32m4_ta(vbool8_t mask, vint32m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint32m8_t test_vcompress_vm_i32m8_ta(vint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m1_ta(
@@ -840,8 +840,8 @@ vint32m8_t test_vcompress_vm_i32m8_ta(vbool4_t mask, vint32m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint64m1_t test_vcompress_vm_i64m1_ta(vint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m2_ta(
@@ -849,8 +849,8 @@ vint64m1_t test_vcompress_vm_i64m1_ta(vbool64_t mask, vint64m1_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint64m2_t test_vcompress_vm_i64m2_ta(vint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m4_ta(
@@ -858,8 +858,8 @@ vint64m2_t test_vcompress_vm_i64m2_ta(vbool32_t mask, vint64m2_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint64m4_t test_vcompress_vm_i64m4_ta(vint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_i64m8_ta(
@@ -867,8 +867,8 @@ vint64m4_t test_vcompress_vm_i64m4_ta(vbool16_t mask, vint64m4_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vint64m8_t test_vcompress_vm_i64m8_ta(vint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf8_ta(
@@ -876,8 +876,8 @@ vint64m8_t test_vcompress_vm_i64m8_ta(vbool8_t mask, vint64m8_t src, size_t vl)
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vcompress.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8mf8_t test_vcompress_vm_u8mf8_ta(vuint8mf8_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf4_ta(
@@ -885,8 +885,8 @@ vuint8mf8_t test_vcompress_vm_u8mf8_ta(vbool64_t mask, vuint8mf8_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vcompress.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8mf4_t test_vcompress_vm_u8mf4_ta(vuint8mf4_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8mf2_ta(
@@ -894,8 +894,8 @@ vuint8mf4_t test_vcompress_vm_u8mf4_ta(vbool32_t mask, vuint8mf4_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vcompress.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8mf2_t test_vcompress_vm_u8mf2_ta(vuint8mf2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m1_ta(
@@ -903,8 +903,8 @@ vuint8mf2_t test_vcompress_vm_u8mf2_ta(vbool16_t mask, vuint8mf2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vcompress.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8m1_t test_vcompress_vm_u8m1_ta(vuint8m1_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m2_ta(
@@ -912,8 +912,8 @@ vuint8m1_t test_vcompress_vm_u8m1_ta(vbool8_t mask, vuint8m1_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vcompress.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8m2_t test_vcompress_vm_u8m2_ta(vuint8m2_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m4_ta(
@@ -921,8 +921,8 @@ vuint8m2_t test_vcompress_vm_u8m2_ta(vbool4_t mask, vuint8m2_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vcompress.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8m4_t test_vcompress_vm_u8m4_ta(vuint8m4_t src, vbool2_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u8m8_ta(
@@ -930,8 +930,8 @@ vuint8m4_t test_vcompress_vm_u8m4_ta(vbool2_t mask, vuint8m4_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vcompress.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint8m8_t test_vcompress_vm_u8m8_ta(vuint8m8_t src, vbool1_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf4_ta(
@@ -939,8 +939,8 @@ vuint8m8_t test_vcompress_vm_u8m8_ta(vbool1_t mask, vuint8m8_t src, size_t vl) {
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vcompress.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16mf4_t test_vcompress_vm_u16mf4_ta(vuint16mf4_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16mf2_ta(
@@ -948,8 +948,8 @@ vuint16mf4_t test_vcompress_vm_u16mf4_ta(vbool64_t mask, vuint16mf4_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vcompress.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16mf2_t test_vcompress_vm_u16mf2_ta(vuint16mf2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m1_ta(
@@ -957,8 +957,8 @@ vuint16mf2_t test_vcompress_vm_u16mf2_ta(vbool32_t mask, vuint16mf2_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vcompress.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16m1_t test_vcompress_vm_u16m1_ta(vuint16m1_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m2_ta(
@@ -966,8 +966,8 @@ vuint16m1_t test_vcompress_vm_u16m1_ta(vbool16_t mask, vuint16m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vcompress.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16m2_t test_vcompress_vm_u16m2_ta(vuint16m2_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m4_ta(
@@ -975,8 +975,8 @@ vuint16m2_t test_vcompress_vm_u16m2_ta(vbool8_t mask, vuint16m2_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vcompress.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16m4_t test_vcompress_vm_u16m4_ta(vuint16m4_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u16m8_ta(
@@ -984,8 +984,8 @@ vuint16m4_t test_vcompress_vm_u16m4_ta(vbool4_t mask, vuint16m4_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vcompress.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint16m8_t test_vcompress_vm_u16m8_ta(vuint16m8_t src, vbool2_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32mf2_ta(
@@ -993,8 +993,8 @@ vuint16m8_t test_vcompress_vm_u16m8_ta(vbool2_t mask, vuint16m8_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vcompress.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint32mf2_t test_vcompress_vm_u32mf2_ta(vuint32mf2_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m1_ta(
@@ -1002,8 +1002,8 @@ vuint32mf2_t test_vcompress_vm_u32mf2_ta(vbool64_t mask, vuint32mf2_t src, size_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vcompress.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint32m1_t test_vcompress_vm_u32m1_ta(vuint32m1_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m2_ta(
@@ -1011,8 +1011,8 @@ vuint32m1_t test_vcompress_vm_u32m1_ta(vbool32_t mask, vuint32m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vcompress.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint32m2_t test_vcompress_vm_u32m2_ta(vuint32m2_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m4_ta(
@@ -1020,8 +1020,8 @@ vuint32m2_t test_vcompress_vm_u32m2_ta(vbool16_t mask, vuint32m2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vcompress.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint32m4_t test_vcompress_vm_u32m4_ta(vuint32m4_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u32m8_ta(
@@ -1029,8 +1029,8 @@ vuint32m4_t test_vcompress_vm_u32m4_ta(vbool8_t mask, vuint32m4_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vcompress.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint32m8_t test_vcompress_vm_u32m8_ta(vuint32m8_t src, vbool4_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m1_ta(
@@ -1038,8 +1038,8 @@ vuint32m8_t test_vcompress_vm_u32m8_ta(vbool4_t mask, vuint32m8_t src, size_t vl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vcompress.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint64m1_t test_vcompress_vm_u64m1_ta(vuint64m1_t src, vbool64_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m2_ta(
@@ -1047,8 +1047,8 @@ vuint64m1_t test_vcompress_vm_u64m1_ta(vbool64_t mask, vuint64m1_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vcompress.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint64m2_t test_vcompress_vm_u64m2_ta(vuint64m2_t src, vbool32_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m4_ta(
@@ -1056,8 +1056,8 @@ vuint64m2_t test_vcompress_vm_u64m2_ta(vbool32_t mask, vuint64m2_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vcompress.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint64m4_t test_vcompress_vm_u64m4_ta(vuint64m4_t src, vbool16_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vcompress_vm_u64m8_ta(
@@ -1065,7 +1065,7 @@ vuint64m4_t test_vcompress_vm_u64m4_ta(vbool16_t mask, vuint64m4_t src, size_t v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vcompress.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vcompress_vm_u64m8_ta(vbool8_t mask, vuint64m8_t src, size_t vl) {
-  return vcompress_ta(mask, src, vl);
+vuint64m8_t test_vcompress_vm_u64m8_ta(vuint64m8_t src, vbool8_t mask, size_t vl) {
+  return vcompress_ta(src, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
index 998b9c316de2c..265c2ddd3d856 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vfmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_tu(
@@ -21,8 +21,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_tu(
@@ -30,8 +30,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_tu(
@@ -39,8 +39,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_tu(
@@ -48,8 +48,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_tu(
@@ -57,8 +57,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_tu(
@@ -66,8 +66,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_tu(
@@ -75,8 +75,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_tu(
@@ -84,8 +84,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_tu(
@@ -93,8 +93,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_tu(
@@ -102,8 +102,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_tu(
@@ -111,8 +111,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_tu(
@@ -120,8 +120,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_tu(
@@ -129,8 +129,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_tu(
@@ -138,8 +138,8 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf4_ta(
@@ -147,8 +147,8 @@ vfloat64m8_t test_vfmerge_vfm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vfmerge.nxv1f16.f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vfloat16mf4_t op1, _Float16 op2, vbool64_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16mf2_ta(
@@ -156,8 +156,8 @@ vfloat16mf4_t test_vfmerge_vfm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, _Flo
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vfmerge.nxv2f16.f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vfloat16mf2_t op1, _Float16 op2, vbool32_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m1_ta(
@@ -165,8 +165,8 @@ vfloat16mf2_t test_vfmerge_vfm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, _Flo
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vfmerge.nxv4f16.f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vfloat16m1_t op1, _Float16 op2, vbool16_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m2_ta(
@@ -174,8 +174,8 @@ vfloat16m1_t test_vfmerge_vfm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, _Float1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vfmerge.nxv8f16.f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vfloat16m2_t op1, _Float16 op2, vbool8_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m4_ta(
@@ -183,8 +183,8 @@ vfloat16m2_t test_vfmerge_vfm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vfmerge.nxv16f16.f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vfloat16m4_t op1, _Float16 op2, vbool4_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f16m8_ta(
@@ -192,8 +192,8 @@ vfloat16m4_t test_vfmerge_vfm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vfmerge.nxv32f16.f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], half [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16 op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vfloat16m8_t op1, _Float16 op2, vbool2_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32mf2_ta(
@@ -201,8 +201,8 @@ vfloat16m8_t test_vfmerge_vfm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, _Float16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vfmerge.nxv1f32.f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, float op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vfloat32mf2_t op1, float op2, vbool64_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m1_ta(
@@ -210,8 +210,8 @@ vfloat32mf2_t test_vfmerge_vfm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, floa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vfmerge.nxv2f32.f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vfloat32m1_t op1, float op2, vbool32_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m2_ta(
@@ -219,8 +219,8 @@ vfloat32m1_t test_vfmerge_vfm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vfmerge.nxv4f32.f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vfloat32m2_t op1, float op2, vbool16_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m4_ta(
@@ -228,8 +228,8 @@ vfloat32m2_t test_vfmerge_vfm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, float o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vfmerge.nxv8f32.f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vfloat32m4_t op1, float op2, vbool8_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f32m8_ta(
@@ -237,8 +237,8 @@ vfloat32m4_t test_vfmerge_vfm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, float op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vfmerge.nxv16f32.f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], float [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vfloat32m8_t op1, float op2, vbool4_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m1_ta(
@@ -246,8 +246,8 @@ vfloat32m8_t test_vfmerge_vfm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, float op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vfmerge.nxv1f64.f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vfloat64m1_t op1, double op2, vbool64_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m2_ta(
@@ -255,8 +255,8 @@ vfloat64m1_t test_vfmerge_vfm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vfmerge.nxv2f64.f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vfloat64m2_t op1, double op2, vbool32_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m4_ta(
@@ -264,8 +264,8 @@ vfloat64m2_t test_vfmerge_vfm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vfmerge.nxv4f64.f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vfloat64m4_t op1, double op2, vbool16_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vfmerge_vfm_f64m8_ta(
@@ -273,7 +273,7 @@ vfloat64m4_t test_vfmerge_vfm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, double
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vfmerge.nxv8f64.f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], double [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, double op2, size_t vl) {
-  return vfmerge_ta(mask, op1, op2, vl);
+vfloat64m8_t test_vfmerge_vfm_f64m8_ta(vfloat64m8_t op1, double op2, vbool8_t mask, size_t vl) {
+  return vfmerge_ta(op1, op2, mask, vl);
 }
 

diff  --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
index a13d95f8cf657..5226663e0b0fa 100644
--- a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/policy/overloaded/vmerge.c
@@ -12,8 +12,8 @@
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_tu(
@@ -21,8 +21,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8_tu(vint8mf8_t maskedoff, vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_tu(
@@ -30,8 +30,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8_tu(vbool64_t mask, vint8mf8_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_tu(
@@ -39,8 +39,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4_tu(vint8mf4_t maskedoff, vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_tu(
@@ -48,8 +48,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4_tu(vbool32_t mask, vint8mf4_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_tu(
@@ -57,8 +57,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2_tu(vint8mf2_t maskedoff, vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_tu(
@@ -66,8 +66,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2_tu(vbool16_t mask, vint8mf2_t maskedoff, vint8m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_tu(
@@ -75,8 +75,8 @@ vint8m1_t test_vmerge_vvm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1_tu(vint8m1_t maskedoff, vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_tu(
@@ -84,8 +84,8 @@ vint8m1_t test_vmerge_vxm_i8m1_tu(vbool8_t mask, vint8m1_t maskedoff, vint8m1_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_tu(
@@ -93,8 +93,8 @@ vint8m2_t test_vmerge_vvm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2_tu(vint8m2_t maskedoff, vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_tu(
@@ -102,8 +102,8 @@ vint8m2_t test_vmerge_vxm_i8m2_tu(vbool4_t mask, vint8m2_t maskedoff, vint8m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_tu(
@@ -111,8 +111,8 @@ vint8m4_t test_vmerge_vvm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4_tu(vint8m4_t maskedoff, vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_tu(
@@ -120,8 +120,8 @@ vint8m4_t test_vmerge_vxm_i8m4_tu(vbool2_t mask, vint8m4_t maskedoff, vint8m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_tu(
@@ -129,8 +129,8 @@ vint8m8_t test_vmerge_vvm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8_tu(vint8m8_t maskedoff, vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_tu(
@@ -138,8 +138,8 @@ vint8m8_t test_vmerge_vxm_i8m8_tu(vbool1_t mask, vint8m8_t maskedoff, vint8m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_tu(
@@ -147,8 +147,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4_tu(vint16mf4_t maskedoff, vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_tu(
@@ -156,8 +156,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4_tu(vbool64_t mask, vint16mf4_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_tu(
@@ -165,8 +165,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2_tu(vint16mf2_t maskedoff, vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_tu(
@@ -174,8 +174,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2_tu(vbool32_t mask, vint16mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_tu(
@@ -183,8 +183,8 @@ vint16m1_t test_vmerge_vvm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1_tu(vint16m1_t maskedoff, vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_tu(
@@ -192,8 +192,8 @@ vint16m1_t test_vmerge_vxm_i16m1_tu(vbool16_t mask, vint16m1_t maskedoff, vint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_tu(
@@ -201,8 +201,8 @@ vint16m2_t test_vmerge_vvm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2_tu(vint16m2_t maskedoff, vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_tu(
@@ -210,8 +210,8 @@ vint16m2_t test_vmerge_vxm_i16m2_tu(vbool8_t mask, vint16m2_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_tu(
@@ -219,8 +219,8 @@ vint16m4_t test_vmerge_vvm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4_tu(vint16m4_t maskedoff, vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_tu(
@@ -228,8 +228,8 @@ vint16m4_t test_vmerge_vxm_i16m4_tu(vbool4_t mask, vint16m4_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_tu(
@@ -237,8 +237,8 @@ vint16m8_t test_vmerge_vvm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8_tu(vint16m8_t maskedoff, vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_tu(
@@ -246,8 +246,8 @@ vint16m8_t test_vmerge_vxm_i16m8_tu(vbool2_t mask, vint16m8_t maskedoff, vint16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_tu(
@@ -255,8 +255,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2_tu(vint32mf2_t maskedoff, vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_tu(
@@ -264,8 +264,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2_tu(vbool64_t mask, vint32mf2_t maskedoff, vin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_tu(
@@ -273,8 +273,8 @@ vint32m1_t test_vmerge_vvm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1_tu(vint32m1_t maskedoff, vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_tu(
@@ -282,8 +282,8 @@ vint32m1_t test_vmerge_vxm_i32m1_tu(vbool32_t mask, vint32m1_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_tu(
@@ -291,8 +291,8 @@ vint32m2_t test_vmerge_vvm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2_tu(vint32m2_t maskedoff, vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_tu(
@@ -300,8 +300,8 @@ vint32m2_t test_vmerge_vxm_i32m2_tu(vbool16_t mask, vint32m2_t maskedoff, vint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_tu(
@@ -309,8 +309,8 @@ vint32m4_t test_vmerge_vvm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4_tu(vint32m4_t maskedoff, vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_tu(
@@ -318,8 +318,8 @@ vint32m4_t test_vmerge_vxm_i32m4_tu(vbool8_t mask, vint32m4_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_tu(
@@ -327,8 +327,8 @@ vint32m8_t test_vmerge_vvm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8_tu(vint32m8_t maskedoff, vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_tu(
@@ -336,8 +336,8 @@ vint32m8_t test_vmerge_vxm_i32m8_tu(vbool4_t mask, vint32m8_t maskedoff, vint32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_tu(
@@ -345,8 +345,8 @@ vint64m1_t test_vmerge_vvm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1_tu(vint64m1_t maskedoff, vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_tu(
@@ -354,8 +354,8 @@ vint64m1_t test_vmerge_vxm_i64m1_tu(vbool64_t mask, vint64m1_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_tu(
@@ -363,8 +363,8 @@ vint64m2_t test_vmerge_vvm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2_tu(vint64m2_t maskedoff, vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_tu(
@@ -372,8 +372,8 @@ vint64m2_t test_vmerge_vxm_i64m2_tu(vbool32_t mask, vint64m2_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_tu(
@@ -381,8 +381,8 @@ vint64m4_t test_vmerge_vvm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4_tu(vint64m4_t maskedoff, vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_tu(
@@ -390,8 +390,8 @@ vint64m4_t test_vmerge_vxm_i64m4_tu(vbool16_t mask, vint64m4_t maskedoff, vint64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_tu(
@@ -399,8 +399,8 @@ vint64m8_t test_vmerge_vvm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8_tu(vint64m8_t maskedoff, vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_tu(
@@ -408,8 +408,8 @@ vint64m8_t test_vmerge_vxm_i64m8_tu(vbool8_t mask, vint64m8_t maskedoff, vint64m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_tu(
@@ -417,8 +417,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vuint8mf8_t maskedoff, vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_tu(
@@ -426,8 +426,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_tu(vbool64_t mask, vuint8mf8_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_tu(
@@ -435,8 +435,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vuint8mf4_t maskedoff, vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_tu(
@@ -444,8 +444,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_tu(vbool32_t mask, vuint8mf4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_tu(
@@ -453,8 +453,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vuint8mf2_t maskedoff, vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_tu(
@@ -462,8 +462,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_tu(vbool16_t mask, vuint8mf2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_tu(
@@ -471,8 +471,8 @@ vuint8m1_t test_vmerge_vvm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1_tu(vuint8m1_t maskedoff, vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_tu(
@@ -480,8 +480,8 @@ vuint8m1_t test_vmerge_vxm_u8m1_tu(vbool8_t mask, vuint8m1_t maskedoff, vuint8m1
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_tu(
@@ -489,8 +489,8 @@ vuint8m2_t test_vmerge_vvm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2_tu(vuint8m2_t maskedoff, vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_tu(
@@ -498,8 +498,8 @@ vuint8m2_t test_vmerge_vxm_u8m2_tu(vbool4_t mask, vuint8m2_t maskedoff, vuint8m2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_tu(
@@ -507,8 +507,8 @@ vuint8m4_t test_vmerge_vvm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4_tu(vuint8m4_t maskedoff, vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_tu(
@@ -516,8 +516,8 @@ vuint8m4_t test_vmerge_vxm_u8m4_tu(vbool2_t mask, vuint8m4_t maskedoff, vuint8m4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_tu(
@@ -525,8 +525,8 @@ vuint8m8_t test_vmerge_vvm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> [[MASKEDOFF:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8_tu(vuint8m8_t maskedoff, vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_tu(
@@ -534,8 +534,8 @@ vuint8m8_t test_vmerge_vxm_u8m8_tu(vbool1_t mask, vuint8m8_t maskedoff, vuint8m8
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_tu(
@@ -543,8 +543,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vuint16mf4_t maskedoff, vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_tu(
@@ -552,8 +552,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_tu(vbool64_t mask, vuint16mf4_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_tu(
@@ -561,8 +561,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vuint16mf2_t maskedoff, vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_tu(
@@ -570,8 +570,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_tu(vbool32_t mask, vuint16mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_tu(
@@ -579,8 +579,8 @@ vuint16m1_t test_vmerge_vvm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1_tu(vuint16m1_t maskedoff, vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_tu(
@@ -588,8 +588,8 @@ vuint16m1_t test_vmerge_vxm_u16m1_tu(vbool16_t mask, vuint16m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_tu(
@@ -597,8 +597,8 @@ vuint16m2_t test_vmerge_vvm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2_tu(vuint16m2_t maskedoff, vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_tu(
@@ -606,8 +606,8 @@ vuint16m2_t test_vmerge_vxm_u16m2_tu(vbool8_t mask, vuint16m2_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_tu(
@@ -615,8 +615,8 @@ vuint16m4_t test_vmerge_vvm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4_tu(vuint16m4_t maskedoff, vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_tu(
@@ -624,8 +624,8 @@ vuint16m4_t test_vmerge_vxm_u16m4_tu(vbool4_t mask, vuint16m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_tu(
@@ -633,8 +633,8 @@ vuint16m8_t test_vmerge_vvm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8_tu(vuint16m8_t maskedoff, vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_tu(
@@ -642,8 +642,8 @@ vuint16m8_t test_vmerge_vxm_u16m8_tu(vbool2_t mask, vuint16m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_tu(
@@ -651,8 +651,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vuint32mf2_t maskedoff, vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_tu(
@@ -660,8 +660,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_tu(vbool64_t mask, vuint32mf2_t maskedoff, v
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_tu(
@@ -669,8 +669,8 @@ vuint32m1_t test_vmerge_vvm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1_tu(vuint32m1_t maskedoff, vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_tu(
@@ -678,8 +678,8 @@ vuint32m1_t test_vmerge_vxm_u32m1_tu(vbool32_t mask, vuint32m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_tu(
@@ -687,8 +687,8 @@ vuint32m2_t test_vmerge_vvm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2_tu(vuint32m2_t maskedoff, vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_tu(
@@ -696,8 +696,8 @@ vuint32m2_t test_vmerge_vxm_u32m2_tu(vbool16_t mask, vuint32m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_tu(
@@ -705,8 +705,8 @@ vuint32m4_t test_vmerge_vvm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4_tu(vuint32m4_t maskedoff, vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_tu(
@@ -714,8 +714,8 @@ vuint32m4_t test_vmerge_vxm_u32m4_tu(vbool8_t mask, vuint32m4_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_tu(
@@ -723,8 +723,8 @@ vuint32m8_t test_vmerge_vvm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8_tu(vuint32m8_t maskedoff, vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_tu(
@@ -732,8 +732,8 @@ vuint32m8_t test_vmerge_vxm_u32m8_tu(vbool4_t mask, vuint32m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_tu(
@@ -741,8 +741,8 @@ vuint64m1_t test_vmerge_vvm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1_tu(vuint64m1_t maskedoff, vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_tu(
@@ -750,8 +750,8 @@ vuint64m1_t test_vmerge_vxm_u64m1_tu(vbool64_t mask, vuint64m1_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_tu(
@@ -759,8 +759,8 @@ vuint64m2_t test_vmerge_vvm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2_tu(vuint64m2_t maskedoff, vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_tu(
@@ -768,8 +768,8 @@ vuint64m2_t test_vmerge_vxm_u64m2_tu(vbool32_t mask, vuint64m2_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_tu(
@@ -777,8 +777,8 @@ vuint64m4_t test_vmerge_vvm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4_tu(vuint64m4_t maskedoff, vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_tu(
@@ -786,8 +786,8 @@ vuint64m4_t test_vmerge_vxm_u64m4_tu(vbool16_t mask, vuint64m4_t maskedoff, vuin
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_tu(
@@ -795,8 +795,8 @@ vuint64m8_t test_vmerge_vvm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8_tu(vuint64m8_t maskedoff, vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf8_ta(
@@ -804,8 +804,8 @@ vuint64m8_t test_vmerge_vxm_u64m8_tu(vbool8_t mask, vuint64m8_t maskedoff, vuint
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vvm_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf8_ta(
@@ -813,8 +813,8 @@ vint8mf8_t test_vmerge_vvm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, vint8mf8_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf8_t test_vmerge_vxm_i8mf8_ta(vint8mf8_t op1, int8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf4_ta(
@@ -822,8 +822,8 @@ vint8mf8_t test_vmerge_vxm_i8mf8_ta(vbool64_t mask, vint8mf8_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vvm_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf4_ta(
@@ -831,8 +831,8 @@ vint8mf4_t test_vmerge_vvm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, vint8mf4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf4_t test_vmerge_vxm_i8mf4_ta(vint8mf4_t op1, int8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8mf2_ta(
@@ -840,8 +840,8 @@ vint8mf4_t test_vmerge_vxm_i8mf4_ta(vbool32_t mask, vint8mf4_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vvm_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8mf2_ta(
@@ -849,8 +849,8 @@ vint8mf2_t test_vmerge_vvm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, vint8mf2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8mf2_t test_vmerge_vxm_i8mf2_ta(vint8mf2_t op1, int8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m1_ta(
@@ -858,8 +858,8 @@ vint8mf2_t test_vmerge_vxm_i8mf2_ta(vbool16_t mask, vint8mf2_t op1, int8_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vvm_i8m1_ta(vint8m1_t op1, vint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m1_ta(
@@ -867,8 +867,8 @@ vint8m1_t test_vmerge_vvm_i8m1_ta(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m1_t test_vmerge_vxm_i8m1_ta(vint8m1_t op1, int8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m2_ta(
@@ -876,8 +876,8 @@ vint8m1_t test_vmerge_vxm_i8m1_ta(vbool8_t mask, vint8m1_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vvm_i8m2_ta(vint8m2_t op1, vint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m2_ta(
@@ -885,8 +885,8 @@ vint8m2_t test_vmerge_vvm_i8m2_ta(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m2_t test_vmerge_vxm_i8m2_ta(vint8m2_t op1, int8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m4_ta(
@@ -894,8 +894,8 @@ vint8m2_t test_vmerge_vxm_i8m2_ta(vbool4_t mask, vint8m2_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vvm_i8m4_ta(vint8m4_t op1, vint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m4_ta(
@@ -903,8 +903,8 @@ vint8m4_t test_vmerge_vvm_i8m4_ta(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m4_t test_vmerge_vxm_i8m4_ta(vint8m4_t op1, int8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i8m8_ta(
@@ -912,8 +912,8 @@ vint8m4_t test_vmerge_vxm_i8m4_ta(vbool2_t mask, vint8m4_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vvm_i8m8_ta(vint8m8_t op1, vint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i8m8_ta(
@@ -921,8 +921,8 @@ vint8m8_t test_vmerge_vvm_i8m8_ta(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint8m8_t test_vmerge_vxm_i8m8_ta(vint8m8_t op1, int8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf4_ta(
@@ -930,8 +930,8 @@ vint8m8_t test_vmerge_vxm_i8m8_ta(vbool1_t mask, vint8m8_t op1, int8_t op2, size
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vvm_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf4_ta(
@@ -939,8 +939,8 @@ vint16mf4_t test_vmerge_vvm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, vint16mf4
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16mf4_t test_vmerge_vxm_i16mf4_ta(vint16mf4_t op1, int16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16mf2_ta(
@@ -948,8 +948,8 @@ vint16mf4_t test_vmerge_vxm_i16mf4_ta(vbool64_t mask, vint16mf4_t op1, int16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vvm_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16mf2_ta(
@@ -957,8 +957,8 @@ vint16mf2_t test_vmerge_vvm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, vint16mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16mf2_t test_vmerge_vxm_i16mf2_ta(vint16mf2_t op1, int16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m1_ta(
@@ -966,8 +966,8 @@ vint16mf2_t test_vmerge_vxm_i16mf2_ta(vbool32_t mask, vint16mf2_t op1, int16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vvm_i16m1_ta(vint16m1_t op1, vint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m1_ta(
@@ -975,8 +975,8 @@ vint16m1_t test_vmerge_vvm_i16m1_ta(vbool16_t mask, vint16m1_t op1, vint16m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m1_t test_vmerge_vxm_i16m1_ta(vint16m1_t op1, int16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m2_ta(
@@ -984,8 +984,8 @@ vint16m1_t test_vmerge_vxm_i16m1_ta(vbool16_t mask, vint16m1_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vvm_i16m2_ta(vint16m2_t op1, vint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m2_ta(
@@ -993,8 +993,8 @@ vint16m2_t test_vmerge_vvm_i16m2_ta(vbool8_t mask, vint16m2_t op1, vint16m2_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m2_t test_vmerge_vxm_i16m2_ta(vint16m2_t op1, int16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m4_ta(
@@ -1002,8 +1002,8 @@ vint16m2_t test_vmerge_vxm_i16m2_ta(vbool8_t mask, vint16m2_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vvm_i16m4_ta(vint16m4_t op1, vint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m4_ta(
@@ -1011,8 +1011,8 @@ vint16m4_t test_vmerge_vvm_i16m4_ta(vbool4_t mask, vint16m4_t op1, vint16m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m4_t test_vmerge_vxm_i16m4_ta(vint16m4_t op1, int16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i16m8_ta(
@@ -1020,8 +1020,8 @@ vint16m4_t test_vmerge_vxm_i16m4_ta(vbool4_t mask, vint16m4_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vvm_i16m8_ta(vint16m8_t op1, vint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i16m8_ta(
@@ -1029,8 +1029,8 @@ vint16m8_t test_vmerge_vvm_i16m8_ta(vbool2_t mask, vint16m8_t op1, vint16m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint16m8_t test_vmerge_vxm_i16m8_ta(vint16m8_t op1, int16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32mf2_ta(
@@ -1038,8 +1038,8 @@ vint16m8_t test_vmerge_vxm_i16m8_ta(vbool2_t mask, vint16m8_t op1, int16_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vvm_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32mf2_ta(
@@ -1047,8 +1047,8 @@ vint32mf2_t test_vmerge_vvm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, vint32mf2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32mf2_t test_vmerge_vxm_i32mf2_ta(vint32mf2_t op1, int32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m1_ta(
@@ -1056,8 +1056,8 @@ vint32mf2_t test_vmerge_vxm_i32mf2_ta(vbool64_t mask, vint32mf2_t op1, int32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vvm_i32m1_ta(vint32m1_t op1, vint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m1_ta(
@@ -1065,8 +1065,8 @@ vint32m1_t test_vmerge_vvm_i32m1_ta(vbool32_t mask, vint32m1_t op1, vint32m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m1_t test_vmerge_vxm_i32m1_ta(vint32m1_t op1, int32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m2_ta(
@@ -1074,8 +1074,8 @@ vint32m1_t test_vmerge_vxm_i32m1_ta(vbool32_t mask, vint32m1_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vvm_i32m2_ta(vint32m2_t op1, vint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m2_ta(
@@ -1083,8 +1083,8 @@ vint32m2_t test_vmerge_vvm_i32m2_ta(vbool16_t mask, vint32m2_t op1, vint32m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m2_t test_vmerge_vxm_i32m2_ta(vint32m2_t op1, int32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m4_ta(
@@ -1092,8 +1092,8 @@ vint32m2_t test_vmerge_vxm_i32m2_ta(vbool16_t mask, vint32m2_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vvm_i32m4_ta(vint32m4_t op1, vint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m4_ta(
@@ -1101,8 +1101,8 @@ vint32m4_t test_vmerge_vvm_i32m4_ta(vbool8_t mask, vint32m4_t op1, vint32m4_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m4_t test_vmerge_vxm_i32m4_ta(vint32m4_t op1, int32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i32m8_ta(
@@ -1110,8 +1110,8 @@ vint32m4_t test_vmerge_vxm_i32m4_ta(vbool8_t mask, vint32m4_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vvm_i32m8_ta(vint32m8_t op1, vint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i32m8_ta(
@@ -1119,8 +1119,8 @@ vint32m8_t test_vmerge_vvm_i32m8_ta(vbool4_t mask, vint32m8_t op1, vint32m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint32m8_t test_vmerge_vxm_i32m8_ta(vint32m8_t op1, int32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m1_ta(
@@ -1128,8 +1128,8 @@ vint32m8_t test_vmerge_vxm_i32m8_ta(vbool4_t mask, vint32m8_t op1, int32_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vvm_i64m1_ta(vint64m1_t op1, vint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m1_ta(
@@ -1137,8 +1137,8 @@ vint64m1_t test_vmerge_vvm_i64m1_ta(vbool64_t mask, vint64m1_t op1, vint64m1_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m1_t test_vmerge_vxm_i64m1_ta(vint64m1_t op1, int64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m2_ta(
@@ -1146,8 +1146,8 @@ vint64m1_t test_vmerge_vxm_i64m1_ta(vbool64_t mask, vint64m1_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vvm_i64m2_ta(vint64m2_t op1, vint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m2_ta(
@@ -1155,8 +1155,8 @@ vint64m2_t test_vmerge_vvm_i64m2_ta(vbool32_t mask, vint64m2_t op1, vint64m2_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m2_t test_vmerge_vxm_i64m2_ta(vint64m2_t op1, int64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m4_ta(
@@ -1164,8 +1164,8 @@ vint64m2_t test_vmerge_vxm_i64m2_ta(vbool32_t mask, vint64m2_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vvm_i64m4_ta(vint64m4_t op1, vint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m4_ta(
@@ -1173,8 +1173,8 @@ vint64m4_t test_vmerge_vvm_i64m4_ta(vbool16_t mask, vint64m4_t op1, vint64m4_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m4_t test_vmerge_vxm_i64m4_ta(vint64m4_t op1, int64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_i64m8_ta(
@@ -1182,8 +1182,8 @@ vint64m4_t test_vmerge_vxm_i64m4_ta(vbool16_t mask, vint64m4_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vvm_i64m8_ta(vint64m8_t op1, vint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_i64m8_ta(
@@ -1191,8 +1191,8 @@ vint64m8_t test_vmerge_vvm_i64m8_ta(vbool8_t mask, vint64m8_t op1, vint64m8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vint64m8_t test_vmerge_vxm_i64m8_ta(vint64m8_t op1, int64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf8_ta(
@@ -1200,8 +1200,8 @@ vint64m8_t test_vmerge_vxm_i64m8_ta(vbool8_t mask, vint64m8_t op1, int64_t op2,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf8_ta(
@@ -1209,8 +1209,8 @@ vuint8mf8_t test_vmerge_vvm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmerge.nxv1i8.i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
 //
-vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf4_ta(
@@ -1218,8 +1218,8 @@ vuint8mf8_t test_vmerge_vxm_u8mf8_ta(vbool64_t mask, vuint8mf8_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf4_ta(
@@ -1227,8 +1227,8 @@ vuint8mf4_t test_vmerge_vvm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmerge.nxv2i8.i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
 //
-vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8mf2_ta(
@@ -1236,8 +1236,8 @@ vuint8mf4_t test_vmerge_vxm_u8mf4_ta(vbool32_t mask, vuint8mf4_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8mf2_ta(
@@ -1245,8 +1245,8 @@ vuint8mf2_t test_vmerge_vvm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmerge.nxv4i8.i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
 //
-vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m1_ta(
@@ -1254,8 +1254,8 @@ vuint8mf2_t test_vmerge_vxm_u8mf2_ta(vbool16_t mask, vuint8mf2_t op1, uint8_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vvm_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m1_ta(
@@ -1263,8 +1263,8 @@ vuint8m1_t test_vmerge_vvm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmerge.nxv8i8.i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
 //
-vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m1_t test_vmerge_vxm_u8m1_ta(vuint8m1_t op1, uint8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m2_ta(
@@ -1272,8 +1272,8 @@ vuint8m1_t test_vmerge_vxm_u8m1_ta(vbool8_t mask, vuint8m1_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vvm_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m2_ta(
@@ -1281,8 +1281,8 @@ vuint8m2_t test_vmerge_vvm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmerge.nxv16i8.i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
 //
-vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m2_t test_vmerge_vxm_u8m2_ta(vuint8m2_t op1, uint8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m4_ta(
@@ -1290,8 +1290,8 @@ vuint8m2_t test_vmerge_vxm_u8m2_ta(vbool4_t mask, vuint8m2_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vvm_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m4_ta(
@@ -1299,8 +1299,8 @@ vuint8m4_t test_vmerge_vvm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmerge.nxv32i8.i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
 //
-vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m4_t test_vmerge_vxm_u8m4_ta(vuint8m4_t op1, uint8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u8m8_ta(
@@ -1308,8 +1308,8 @@ vuint8m4_t test_vmerge_vxm_u8m4_ta(vbool2_t mask, vuint8m4_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vvm_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u8m8_ta(
@@ -1317,8 +1317,8 @@ vuint8m8_t test_vmerge_vvm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmerge.nxv64i8.i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
 //
-vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint8m8_t test_vmerge_vxm_u8m8_ta(vuint8m8_t op1, uint8_t op2, vbool1_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf4_ta(
@@ -1326,8 +1326,8 @@ vuint8m8_t test_vmerge_vxm_u8m8_ta(vbool1_t mask, vuint8m8_t op1, uint8_t op2, s
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf4_ta(
@@ -1335,8 +1335,8 @@ vuint16mf4_t test_vmerge_vvm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmerge.nxv1i16.i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
 //
-vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16mf2_ta(
@@ -1344,8 +1344,8 @@ vuint16mf4_t test_vmerge_vxm_u16mf4_ta(vbool64_t mask, vuint16mf4_t op1, uint16_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16mf2_ta(
@@ -1353,8 +1353,8 @@ vuint16mf2_t test_vmerge_vvm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, vuint16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmerge.nxv2i16.i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
 //
-vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m1_ta(
@@ -1362,8 +1362,8 @@ vuint16mf2_t test_vmerge_vxm_u16mf2_ta(vbool32_t mask, vuint16mf2_t op1, uint16_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vvm_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m1_ta(
@@ -1371,8 +1371,8 @@ vuint16m1_t test_vmerge_vvm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, vuint16m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmerge.nxv4i16.i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
 //
-vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m1_t test_vmerge_vxm_u16m1_ta(vuint16m1_t op1, uint16_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m2_ta(
@@ -1380,8 +1380,8 @@ vuint16m1_t test_vmerge_vxm_u16m1_ta(vbool16_t mask, vuint16m1_t op1, uint16_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vvm_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m2_ta(
@@ -1389,8 +1389,8 @@ vuint16m2_t test_vmerge_vvm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, vuint16m2_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmerge.nxv8i16.i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
 //
-vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m2_t test_vmerge_vxm_u16m2_ta(vuint16m2_t op1, uint16_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m4_ta(
@@ -1398,8 +1398,8 @@ vuint16m2_t test_vmerge_vxm_u16m2_ta(vbool8_t mask, vuint16m2_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vvm_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m4_ta(
@@ -1407,8 +1407,8 @@ vuint16m4_t test_vmerge_vvm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, vuint16m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmerge.nxv16i16.i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
 //
-vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m4_t test_vmerge_vxm_u16m4_ta(vuint16m4_t op1, uint16_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u16m8_ta(
@@ -1416,8 +1416,8 @@ vuint16m4_t test_vmerge_vxm_u16m4_ta(vbool4_t mask, vuint16m4_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vvm_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u16m8_ta(
@@ -1425,8 +1425,8 @@ vuint16m8_t test_vmerge_vvm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, vuint16m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmerge.nxv32i16.i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
 //
-vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint16m8_t test_vmerge_vxm_u16m8_ta(vuint16m8_t op1, uint16_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32mf2_ta(
@@ -1434,8 +1434,8 @@ vuint16m8_t test_vmerge_vxm_u16m8_ta(vbool2_t mask, vuint16m8_t op1, uint16_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32mf2_ta(
@@ -1443,8 +1443,8 @@ vuint32mf2_t test_vmerge_vvm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, vuint32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmerge.nxv1i32.i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
 //
-vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m1_ta(
@@ -1452,8 +1452,8 @@ vuint32mf2_t test_vmerge_vxm_u32mf2_ta(vbool64_t mask, vuint32mf2_t op1, uint32_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vvm_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m1_ta(
@@ -1461,8 +1461,8 @@ vuint32m1_t test_vmerge_vvm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, vuint32m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmerge.nxv2i32.i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
 //
-vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m1_t test_vmerge_vxm_u32m1_ta(vuint32m1_t op1, uint32_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m2_ta(
@@ -1470,8 +1470,8 @@ vuint32m1_t test_vmerge_vxm_u32m1_ta(vbool32_t mask, vuint32m1_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vvm_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m2_ta(
@@ -1479,8 +1479,8 @@ vuint32m2_t test_vmerge_vvm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, vuint32m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmerge.nxv4i32.i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
 //
-vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m2_t test_vmerge_vxm_u32m2_ta(vuint32m2_t op1, uint32_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m4_ta(
@@ -1488,8 +1488,8 @@ vuint32m2_t test_vmerge_vxm_u32m2_ta(vbool16_t mask, vuint32m2_t op1, uint32_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vvm_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m4_ta(
@@ -1497,8 +1497,8 @@ vuint32m4_t test_vmerge_vvm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, vuint32m4_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmerge.nxv8i32.i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
 //
-vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m4_t test_vmerge_vxm_u32m4_ta(vuint32m4_t op1, uint32_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u32m8_ta(
@@ -1506,8 +1506,8 @@ vuint32m4_t test_vmerge_vxm_u32m4_ta(vbool8_t mask, vuint32m4_t op1, uint32_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vvm_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u32m8_ta(
@@ -1515,8 +1515,8 @@ vuint32m8_t test_vmerge_vvm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, vuint32m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmerge.nxv16i32.i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
 //
-vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint32m8_t test_vmerge_vxm_u32m8_ta(vuint32m8_t op1, uint32_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m1_ta(
@@ -1524,8 +1524,8 @@ vuint32m8_t test_vmerge_vxm_u32m8_ta(vbool4_t mask, vuint32m8_t op1, uint32_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vvm_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m1_ta(
@@ -1533,8 +1533,8 @@ vuint64m1_t test_vmerge_vvm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, vuint64m1_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmerge.nxv1i64.i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
 //
-vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m1_t test_vmerge_vxm_u64m1_ta(vuint64m1_t op1, uint64_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m2_ta(
@@ -1542,8 +1542,8 @@ vuint64m1_t test_vmerge_vxm_u64m1_ta(vbool64_t mask, vuint64m1_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vvm_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m2_ta(
@@ -1551,8 +1551,8 @@ vuint64m2_t test_vmerge_vvm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, vuint64m2_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmerge.nxv2i64.i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
 //
-vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m2_t test_vmerge_vxm_u64m2_ta(vuint64m2_t op1, uint64_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m4_ta(
@@ -1560,8 +1560,8 @@ vuint64m2_t test_vmerge_vxm_u64m2_ta(vbool32_t mask, vuint64m2_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vvm_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m4_ta(
@@ -1569,8 +1569,8 @@ vuint64m4_t test_vmerge_vvm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, vuint64m4_
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmerge.nxv4i64.i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
 //
-vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m4_t test_vmerge_vxm_u64m4_ta(vuint64m4_t op1, uint64_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_u64m8_ta(
@@ -1578,8 +1578,8 @@ vuint64m4_t test_vmerge_vxm_u64m4_ta(vbool16_t mask, vuint64m4_t op1, uint64_t o
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vvm_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vxm_u64m8_ta(
@@ -1587,8 +1587,8 @@ vuint64m8_t test_vmerge_vvm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, vuint64m8_t
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmerge.nxv8i64.i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
 //
-vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vuint64m8_t test_vmerge_vxm_u64m8_ta(vuint64m8_t op1, uint64_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_tu(
@@ -1596,8 +1596,8 @@ vuint64m8_t test_vmerge_vxm_u64m8_ta(vbool8_t mask, vuint64m8_t op1, uint64_t op
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> [[MASKEDOFF:%.*]], <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vfloat16mf4_t maskedoff, vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_tu(
@@ -1605,8 +1605,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_tu(vbool64_t mask, vfloat16mf4_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> [[MASKEDOFF:%.*]], <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vfloat16mf2_t maskedoff, vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_tu(
@@ -1614,8 +1614,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_tu(vbool32_t mask, vfloat16mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> [[MASKEDOFF:%.*]], <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1_tu(vfloat16m1_t maskedoff, vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_tu(
@@ -1623,8 +1623,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1_tu(vbool16_t mask, vfloat16m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> [[MASKEDOFF:%.*]], <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2_tu(vfloat16m2_t maskedoff, vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_tu(
@@ -1632,8 +1632,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2_tu(vbool8_t mask, vfloat16m2_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> [[MASKEDOFF:%.*]], <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4_tu(vfloat16m4_t maskedoff, vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_tu(
@@ -1641,8 +1641,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4_tu(vbool4_t mask, vfloat16m4_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> [[MASKEDOFF:%.*]], <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8_tu(vfloat16m8_t maskedoff, vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_tu(
@@ -1650,8 +1650,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8_tu(vbool2_t mask, vfloat16m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> [[MASKEDOFF:%.*]], <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vfloat32mf2_t maskedoff, vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_tu(
@@ -1659,8 +1659,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_tu(vbool64_t mask, vfloat32mf2_t maskedoff,
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> [[MASKEDOFF:%.*]], <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1_tu(vfloat32m1_t maskedoff, vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_tu(
@@ -1668,8 +1668,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1_tu(vbool32_t mask, vfloat32m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> [[MASKEDOFF:%.*]], <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2_tu(vfloat32m2_t maskedoff, vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_tu(
@@ -1677,8 +1677,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2_tu(vbool16_t mask, vfloat32m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> [[MASKEDOFF:%.*]], <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4_tu(vfloat32m4_t maskedoff, vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_tu(
@@ -1686,8 +1686,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4_tu(vbool8_t mask, vfloat32m4_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> [[MASKEDOFF:%.*]], <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8_tu(vfloat32m8_t maskedoff, vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_tu(
@@ -1695,8 +1695,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8_tu(vbool4_t mask, vfloat32m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> [[MASKEDOFF:%.*]], <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1_tu(vfloat64m1_t maskedoff, vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_tu(
@@ -1704,8 +1704,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1_tu(vbool64_t mask, vfloat64m1_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> [[MASKEDOFF:%.*]], <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2_tu(vfloat64m2_t maskedoff, vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_tu(
@@ -1713,8 +1713,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2_tu(vbool32_t mask, vfloat64m2_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> [[MASKEDOFF:%.*]], <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4_tu(vfloat64m4_t maskedoff, vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_tu(
@@ -1722,8 +1722,8 @@ vfloat64m4_t test_vmerge_vvm_f64m4_tu(vbool16_t mask, vfloat64m4_t maskedoff, vf
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> [[MASKEDOFF:%.*]], <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge_tu(mask, maskedoff, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8_tu(vfloat64m8_t maskedoff, vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_tu(maskedoff, op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf4_ta(
@@ -1731,8 +1731,8 @@ vfloat64m8_t test_vmerge_vvm_f64m8_tu(vbool8_t mask, vfloat64m8_t maskedoff, vfl
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vmerge.nxv1f16.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[OP1:%.*]], <vscale x 1 x half> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
 //
-vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloat16mf4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vfloat16mf4_t op1, vfloat16mf4_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16mf2_ta(
@@ -1740,8 +1740,8 @@ vfloat16mf4_t test_vmerge_vvm_f16mf4_ta(vbool64_t mask, vfloat16mf4_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vmerge.nxv2f16.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[OP1:%.*]], <vscale x 2 x half> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
 //
-vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloat16mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vfloat16mf2_t op1, vfloat16mf2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m1_ta(
@@ -1749,8 +1749,8 @@ vfloat16mf2_t test_vmerge_vvm_f16mf2_ta(vbool32_t mask, vfloat16mf2_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vmerge.nxv4f16.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[OP1:%.*]], <vscale x 4 x half> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
 //
-vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16m1_t test_vmerge_vvm_f16m1_ta(vfloat16m1_t op1, vfloat16m1_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m2_ta(
@@ -1758,8 +1758,8 @@ vfloat16m1_t test_vmerge_vvm_f16m1_ta(vbool16_t mask, vfloat16m1_t op1, vfloat16
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vmerge.nxv8f16.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[OP1:%.*]], <vscale x 8 x half> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
 //
-vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16m2_t test_vmerge_vvm_f16m2_ta(vfloat16m2_t op1, vfloat16m2_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m4_ta(
@@ -1767,8 +1767,8 @@ vfloat16m2_t test_vmerge_vvm_f16m2_ta(vbool8_t mask, vfloat16m2_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vmerge.nxv16f16.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[OP1:%.*]], <vscale x 16 x half> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
 //
-vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16m4_t test_vmerge_vvm_f16m4_ta(vfloat16m4_t op1, vfloat16m4_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f16m8_ta(
@@ -1776,8 +1776,8 @@ vfloat16m4_t test_vmerge_vvm_f16m4_ta(vbool4_t mask, vfloat16m4_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vmerge.nxv32f16.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[OP1:%.*]], <vscale x 32 x half> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
 //
-vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat16m8_t test_vmerge_vvm_f16m8_ta(vfloat16m8_t op1, vfloat16m8_t op2, vbool2_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32mf2_ta(
@@ -1785,8 +1785,8 @@ vfloat16m8_t test_vmerge_vvm_f16m8_ta(vbool2_t mask, vfloat16m8_t op1, vfloat16m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmerge.nxv1f32.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[OP1:%.*]], <vscale x 1 x float> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
 //
-vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloat32mf2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vfloat32mf2_t op1, vfloat32mf2_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m1_ta(
@@ -1794,8 +1794,8 @@ vfloat32mf2_t test_vmerge_vvm_f32mf2_ta(vbool64_t mask, vfloat32mf2_t op1, vfloa
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmerge.nxv2f32.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[OP1:%.*]], <vscale x 2 x float> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
 //
-vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat32m1_t test_vmerge_vvm_f32m1_ta(vfloat32m1_t op1, vfloat32m1_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m2_ta(
@@ -1803,8 +1803,8 @@ vfloat32m1_t test_vmerge_vvm_f32m1_ta(vbool32_t mask, vfloat32m1_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmerge.nxv4f32.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[OP1:%.*]], <vscale x 4 x float> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
 //
-vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat32m2_t test_vmerge_vvm_f32m2_ta(vfloat32m2_t op1, vfloat32m2_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m4_ta(
@@ -1812,8 +1812,8 @@ vfloat32m2_t test_vmerge_vvm_f32m2_ta(vbool16_t mask, vfloat32m2_t op1, vfloat32
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmerge.nxv8f32.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[OP1:%.*]], <vscale x 8 x float> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
 //
-vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat32m4_t test_vmerge_vvm_f32m4_ta(vfloat32m4_t op1, vfloat32m4_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f32m8_ta(
@@ -1821,8 +1821,8 @@ vfloat32m4_t test_vmerge_vvm_f32m4_ta(vbool8_t mask, vfloat32m4_t op1, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmerge.nxv16f32.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[OP1:%.*]], <vscale x 16 x float> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
 //
-vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat32m8_t test_vmerge_vvm_f32m8_ta(vfloat32m8_t op1, vfloat32m8_t op2, vbool4_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m1_ta(
@@ -1830,8 +1830,8 @@ vfloat32m8_t test_vmerge_vvm_f32m8_ta(vbool4_t mask, vfloat32m8_t op1, vfloat32m
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmerge.nxv1f64.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[OP1:%.*]], <vscale x 1 x double> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
 //
-vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64m1_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat64m1_t test_vmerge_vvm_f64m1_ta(vfloat64m1_t op1, vfloat64m1_t op2, vbool64_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m2_ta(
@@ -1839,8 +1839,8 @@ vfloat64m1_t test_vmerge_vvm_f64m1_ta(vbool64_t mask, vfloat64m1_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmerge.nxv2f64.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[OP1:%.*]], <vscale x 2 x double> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
 //
-vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64m2_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat64m2_t test_vmerge_vvm_f64m2_ta(vfloat64m2_t op1, vfloat64m2_t op2, vbool32_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m4_ta(
@@ -1848,8 +1848,8 @@ vfloat64m2_t test_vmerge_vvm_f64m2_ta(vbool32_t mask, vfloat64m2_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmerge.nxv4f64.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[OP1:%.*]], <vscale x 4 x double> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
 //
-vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64m4_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat64m4_t test_vmerge_vvm_f64m4_ta(vfloat64m4_t op1, vfloat64m4_t op2, vbool16_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 
 // CHECK-RV64-LABEL: @test_vmerge_vvm_f64m8_ta(
@@ -1857,7 +1857,7 @@ vfloat64m4_t test_vmerge_vvm_f64m4_ta(vbool16_t mask, vfloat64m4_t op1, vfloat64
 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmerge.nxv8f64.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[OP1:%.*]], <vscale x 8 x double> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
 //
-vfloat64m8_t test_vmerge_vvm_f64m8_ta(vbool8_t mask, vfloat64m8_t op1, vfloat64m8_t op2, size_t vl) {
-  return vmerge_ta(mask, op1, op2, vl);
+vfloat64m8_t test_vmerge_vvm_f64m8_ta(vfloat64m8_t op1, vfloat64m8_t op2, vbool8_t mask, size_t vl) {
+  return vmerge_ta(op1, op2, mask, vl);
 }
 


        


More information about the cfe-commits mailing list