[clang] [llvm] [RISCV] Add Zvzip intrinsics (PR #186342)

Pengcheng Wang via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 30 20:52:25 PDT 2026


https://github.com/wangpc-pp updated https://github.com/llvm/llvm-project/pull/186342

>From 0d9e9b634a003fbb9b1f85750aec91259982058e Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 27 Jan 2026 11:49:41 +0000
Subject: [PATCH 1/5] [RISCV] Add Zvzip intrinsics

In the RVV Clang builtins generator, a new prototype descriptor
`d` was added to represent vectors with `2 x LMUL`.

The .ll tests were generated by LLM and I have reviewed them.

And the .c tests were generated by
https://github.com/riscv-non-isa/riscv-rvv-intrinsic-doc/pull/431.
---
 clang/docs/ReleaseNotes.rst                   |    1 +
 clang/include/clang/Basic/riscv_vector.td     |   18 +
 .../clang/Basic/riscv_vector_common.td        |    2 +
 .../clang/Support/RISCVVIntrinsicUtils.h      |    1 +
 clang/lib/Support/RISCVVIntrinsicUtils.cpp    |   10 +-
 .../zvzip/non-policy/non-overloaded/vpaire.c  | 1267 ++++++++
 .../zvzip/non-policy/non-overloaded/vpairo.c  | 1267 ++++++++
 .../zvzip/non-policy/non-overloaded/vunzipe.c |  973 ++++++
 .../zvzip/non-policy/non-overloaded/vunzipo.c |  973 ++++++
 .../zvzip/non-policy/non-overloaded/vzip.c    | 1022 +++++++
 .../zvzip/non-policy/overloaded/vpaire.c      | 1267 ++++++++
 .../zvzip/non-policy/overloaded/vpairo.c      | 1267 ++++++++
 .../zvzip/non-policy/overloaded/vunzipe.c     |  973 ++++++
 .../zvzip/non-policy/overloaded/vunzipo.c     |  973 ++++++
 .../zvzip/non-policy/overloaded/vzip.c        | 1022 +++++++
 .../zvzip/policy/non-overloaded/vpaire.c      | 2723 +++++++++++++++++
 .../zvzip/policy/non-overloaded/vpairo.c      | 2723 +++++++++++++++++
 .../zvzip/policy/non-overloaded/vunzipe.c     | 2090 +++++++++++++
 .../zvzip/policy/non-overloaded/vunzipo.c     | 2090 +++++++++++++
 .../zvzip/policy/non-overloaded/vzip.c        | 2189 +++++++++++++
 .../zvzip/policy/overloaded/vpaire.c          | 2723 +++++++++++++++++
 .../zvzip/policy/overloaded/vpairo.c          | 2723 +++++++++++++++++
 .../zvzip/policy/overloaded/vunzipe.c         | 2090 +++++++++++++
 .../zvzip/policy/overloaded/vunzipo.c         | 2090 +++++++++++++
 .../zvzip/policy/overloaded/vzip.c            | 2189 +++++++++++++
 llvm/include/llvm/IR/IntrinsicsRISCV.td       |   53 +
 llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td  |  116 +
 llvm/test/CodeGen/RISCV/rvv/vpaire.ll         | 1115 +++++++
 llvm/test/CodeGen/RISCV/rvv/vpairo.ll         | 1115 +++++++
 llvm/test/CodeGen/RISCV/rvv/vunzipe.ll        |  823 +++++
 llvm/test/CodeGen/RISCV/rvv/vunzipo.ll        |  823 +++++
 llvm/test/CodeGen/RISCV/rvv/vzip.ll           |  883 ++++++
 32 files changed, 39593 insertions(+), 1 deletion(-)
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpaire.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpairo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipe.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vzip.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpaire.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpairo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipe.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vzip.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpaire.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpairo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipe.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vzip.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpaire.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpairo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipe.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipo.c
 create mode 100644 clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vzip.c
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vpaire.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vpairo.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
 create mode 100644 llvm/test/CodeGen/RISCV/rvv/vzip.ll

diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index 0f1190510f0f8..f24fda8b7b3e6 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -483,6 +483,7 @@ RISC-V Support
 
 - Tenstorrent Ascalon D8 was renamed to Ascalon X. Use `tt-ascalon-x` with `-mcpu` or `-mtune`.
 - Intrinsics were added for the 'Zvabd` (RISC-V Integer Vector Absolute Difference) extension.
+- Intrinsics were added for the 'Zvzip` (Reordering Structured Data in Vector Registers) extension.
 
 CUDA/HIP Language Changes
 ^^^^^^^^^^^^^^^^^^^^^^^^^
diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
index 5e4edbe01f1cd..cc20d7459b242 100644
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -2133,3 +2133,21 @@ let RequiredFeatures = ["zvdot4a8i"] in {
                               ["vx", "v", "vvUvUe"]]>;
   defm vdota4us : RVVVDOTA4QBuiltinSet<[["vx", "v", "vvUvUe"]]>;
 }
+
+// Zvzip
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+  let RequiredFeatures = ["zvzip"] in {
+    // Signed and floating type
+    defm vzip : RVVOutOp0BuiltinSet<"vzip", "csilxfdy", [["vv", "d", "dvv"]]>;
+    defm vunzipe : RVVOutOp0BuiltinSet<"vunzipe", "csilxfdy", [["v", "v", "vd"]]>;
+    defm vunzipo : RVVOutOp0BuiltinSet<"vunzipo", "csilxfdy", [["v", "v", "vd"]]>;
+    defm vpaire : RVVOutBuiltinSet<"vpaire", "csilxfdy", [["vv", "v", "vvv"]]>;
+    defm vpairo : RVVOutBuiltinSet<"vpairo", "csilxfdy", [["vv", "v", "vvv"]]>;
+    // Unsigned
+    defm vzip : RVVOutOp0BuiltinSet<"vzip", "csil", [["vv", "Ud", "UdUvUv"]]>;
+    defm vunzipe : RVVOutOp0BuiltinSet<"vunzipe", "csil", [["v", "Uv", "UvUd"]]>;
+    defm vunzipo : RVVOutOp0BuiltinSet<"vunzipo", "csil", [["v", "Uv", "UvUd"]]>;
+    defm vpaire : RVVOutBuiltinSet<"vpaire", "csil", [["vv", "Uv", "UvUvUv"]]>;
+    defm vpairo : RVVOutBuiltinSet<"vpairo", "csil", [["vv", "Uv", "UvUvUv"]]>;
+  }
+}
diff --git a/clang/include/clang/Basic/riscv_vector_common.td b/clang/include/clang/Basic/riscv_vector_common.td
index 5cf45fcc845da..296c72199ba87 100644
--- a/clang/include/clang/Basic/riscv_vector_common.td
+++ b/clang/include/clang/Basic/riscv_vector_common.td
@@ -60,6 +60,8 @@
 //      element type which is four times as wide as the element type of 'v'
 //   o: computes a vector type identical to what 'v' computes except for the
 //      element type which is eight times as wide as the element type of 'v'
+//   d: computes a vector type identical to what 'v' computes except for the
+//      LMUL which is twice as large as the LMUL of 'v'
 //   m: computes a vector type identical to what 'v' computes except for the
 //      element type which is bool
 //   0: void type, ignores "t"
diff --git a/clang/include/clang/Support/RISCVVIntrinsicUtils.h b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
index 4016cc2f77dec..ec98069432ff8 100644
--- a/clang/include/clang/Support/RISCVVIntrinsicUtils.h
+++ b/clang/include/clang/Support/RISCVVIntrinsicUtils.h
@@ -35,6 +35,7 @@ enum class VectorTypeModifier : uint8_t {
   Widening2XVector,
   Widening4XVector,
   Widening8XVector,
+  DoubleLMULVector,
   MaskVector,
   Log2EEW3,
   Log2EEW4,
diff --git a/clang/lib/Support/RISCVVIntrinsicUtils.cpp b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
index a5430aee6b746..145e65fb1d747 100644
--- a/clang/lib/Support/RISCVVIntrinsicUtils.cpp
+++ b/clang/lib/Support/RISCVVIntrinsicUtils.cpp
@@ -448,6 +448,10 @@ PrototypeDescriptor::parsePrototypeDescriptor(
     PT = BaseTypeModifier::Vector;
     VTM = VectorTypeModifier::Widening2XVector;
     break;
+  case 'd':
+    PT = BaseTypeModifier::Vector;
+    VTM = VectorTypeModifier::DoubleLMULVector;
+    break;
   case 'q':
     PT = BaseTypeModifier::Vector;
     VTM = VectorTypeModifier::Widening4XVector;
@@ -737,6 +741,10 @@ void RVVType::applyModifier(const PrototypeDescriptor &Transformer) {
         ScalarType == ScalarTypeKind::FloatE5M2)
       ScalarType = ScalarTypeKind::BFloat;
     break;
+  case VectorTypeModifier::DoubleLMULVector:
+    LMUL.MulLog2LMUL(1);
+    Scale = LMUL.getScale(ElementBitwidth);
+    break;
   case VectorTypeModifier::Widening4XVector:
     ElementBitwidth *= 4;
     LMUL.MulLog2LMUL(2);
@@ -1219,7 +1227,7 @@ void RVVIntrinsic::updateNamesAndPolicy(bool IsMasked, bool HasPolicy,
 
 SmallVector<PrototypeDescriptor> parsePrototypes(StringRef Prototypes) {
   SmallVector<PrototypeDescriptor> PrototypeDescriptors;
-  const StringRef Primaries("evwqom0ztulf");
+  const StringRef Primaries("evwdqom0ztulf");
   while (!Prototypes.empty()) {
     size_t Idx = 0;
     // Skip over complex prototype because it could contain primitive type
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpaire.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpaire.c
new file mode 100644
index 0000000000000..0f1db364139f8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpaire.c
@@ -0,0 +1,1267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d  \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_f16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_f16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_f64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire_vv_u32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                      vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                      vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
+                                    vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
+                                    vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
+                                    vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
+                                    vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                      vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+                                    vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+                                    vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+                                    vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+                                    vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
+                                    vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
+                                    vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
+                                    vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
+                                    vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire_vv_i8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire_vv_i8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire_vv_i8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire_vv_i8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
+                                    vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_i64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_u8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_u8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_u8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_u8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
+                                     vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
+                                   vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
+                                   vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
+                                   vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
+                                   vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
+                                   vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
+                                   vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
+                                   vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m8_m(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpairo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpairo.c
new file mode 100644
index 0000000000000..2f7c767afe885
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vpairo.c
@@ -0,0 +1,1267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d  \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_f16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_f16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_f32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_f64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo_vv_u16mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo_vv_u32mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                      vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                      vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
+                                    vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
+                                    vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
+                                    vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
+                                    vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                      vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+                                    vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+                                    vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+                                    vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+                                    vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
+                                    vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
+                                    vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
+                                    vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
+                                    vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo_vv_i8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo_vv_i8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo_vv_i8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo_vv_i8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
+                                    vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_i64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_u8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_u8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_u8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_u8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
+                                     vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
+                                   vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
+                                   vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
+                                   vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
+                                   vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
+                                   vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
+                                   vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
+                                   vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m8_m(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipe.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipe.c
new file mode 100644
index 0000000000000..c80e4e2a8332d
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipe.c
@@ -0,0 +1,973 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4(vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2(vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1(vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2(vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4(vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2(vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1(vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2(vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4(vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1(vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2(vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4(vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8(vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf8(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4(vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2(vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1(vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2(vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4(vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4(vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2(vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1(vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2(vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4(vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2(vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1(vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2(vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4(vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1(vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2(vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4(vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8(vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4(vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2(vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1(vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2(vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4(vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4(vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2(vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1(vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2(vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4(vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2(vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1(vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2(vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4(vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1(vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2(vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4(vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_m(vbool64_t vm, vfloat16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_f16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_m(vbool32_t vm, vfloat16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_f16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_m(vbool16_t vm, vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_m(vbool8_t vm, vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_m(vbool4_t vm, vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_m(vbool64_t vm, vfloat32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_f32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_m(vbool32_t vm, vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_m(vbool16_t vm, vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_m(vbool8_t vm, vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_m(vbool64_t vm, vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_m(vbool32_t vm, vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_m(vbool16_t vm, vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_m(vbool64_t vm, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf8_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_m(vbool32_t vm, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_m(vbool16_t vm, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_m(vbool8_t vm, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_m(vbool4_t vm, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_m(vbool2_t vm, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_m(vbool64_t vm, vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_m(vbool32_t vm, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_m(vbool16_t vm, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_m(vbool8_t vm, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_m(vbool4_t vm, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_m(vbool64_t vm, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_m(vbool32_t vm, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_m(vbool16_t vm, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_m(vbool8_t vm, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_m(vbool64_t vm, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_m(vbool32_t vm, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_m(vbool16_t vm, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_m(vbool64_t vm, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_m(vbool32_t vm, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_m(vbool16_t vm, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_m(vbool8_t vm, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_m(vbool4_t vm, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_m(vbool2_t vm, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_m(vbool64_t vm, vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_m(vbool32_t vm, vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_m(vbool16_t vm, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_m(vbool8_t vm, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_m(vbool4_t vm, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_m(vbool64_t vm, vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_m(vbool32_t vm, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_m(vbool16_t vm, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_m(vbool8_t vm, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_m(vbool64_t vm, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_m(vbool32_t vm, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_m(vbool16_t vm, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4_m(vm, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipo.c
new file mode 100644
index 0000000000000..d1235cd1700c0
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vunzipo.c
@@ -0,0 +1,973 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4(vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2(vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1(vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2(vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4(vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2(vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1(vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2(vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4(vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1(vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2(vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4(vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8(vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf8(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4(vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2(vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1(vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2(vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4(vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4(vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2(vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1(vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2(vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4(vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2(vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1(vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2(vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4(vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1(vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2(vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4(vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8(vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4(vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2(vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1(vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2(vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4(vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4(vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2(vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1(vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2(vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4(vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2(vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32mf2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1(vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2(vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4(vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1(vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2(vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4(vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_m(vbool64_t vm, vfloat16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_f16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_m(vbool32_t vm, vfloat16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_f16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_m(vbool16_t vm, vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_m(vbool8_t vm, vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_m(vbool4_t vm, vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_m(vbool64_t vm, vfloat32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_f32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_m(vbool32_t vm, vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_m(vbool16_t vm, vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_m(vbool8_t vm, vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_m(vbool64_t vm, vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_m(vbool32_t vm, vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_m(vbool16_t vm, vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_m(vbool64_t vm, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf8_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_m(vbool32_t vm, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_m(vbool16_t vm, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_m(vbool8_t vm, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_m(vbool4_t vm, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_m(vbool2_t vm, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_m(vbool64_t vm, vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_m(vbool32_t vm, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_m(vbool16_t vm, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_m(vbool8_t vm, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_m(vbool4_t vm, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_m(vbool64_t vm, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_m(vbool32_t vm, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_m(vbool16_t vm, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_m(vbool8_t vm, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_m(vbool64_t vm, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_m(vbool32_t vm, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_m(vbool16_t vm, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_m(vbool64_t vm, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_m(vbool32_t vm, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_m(vbool16_t vm, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_m(vbool8_t vm, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_m(vbool4_t vm, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_m(vbool2_t vm, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_m(vbool64_t vm, vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_m(vbool32_t vm, vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_m(vbool16_t vm, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_m(vbool8_t vm, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_m(vbool4_t vm, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_m(vbool64_t vm, vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32mf2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_m(vbool32_t vm, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_m(vbool16_t vm, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_m(vbool8_t vm, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m4_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_m(vbool64_t vm, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_m(vbool32_t vm, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2_m(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_m(vbool16_t vm, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4_m(vm, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vzip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vzip.c
new file mode 100644
index 0000000000000..895d47666ff64
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/non-overloaded/vzip.c
@@ -0,0 +1,1022 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.i64(<vscale x 2 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip_vv_f16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_f16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.i64(<vscale x 8 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.i64(<vscale x 16 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.i64(<vscale x 32 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_f32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.i64(<vscale x 4 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.i64(<vscale x 8 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.i64(<vscale x 16 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.i64(<vscale x 2 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.i64(<vscale x 4 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.i64(<vscale x 8 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u16mf2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m1(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m2(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m4(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m8(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                    vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                  vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_m(vbool16_t vm, vfloat16m1_t vs2,
+                                  vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_m(vbool8_t vm, vfloat16m2_t vs2,
+                                  vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_m(vbool4_t vm, vfloat16m4_t vs2,
+                                  vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                  vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_m(vbool32_t vm, vfloat32m1_t vs2,
+                                  vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_m(vbool16_t vm, vfloat32m2_t vs2,
+                                  vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_m(vbool8_t vm, vfloat32m4_t vs2,
+                                  vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_m(vbool64_t vm, vfloat64m1_t vs2,
+                                  vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_m(vbool32_t vm, vfloat64m2_t vs2,
+                                  vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_m(vbool16_t vm, vfloat64m4_t vs2,
+                                  vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                              size_t vl) {
+  return __riscv_vzip_vv_i8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                              size_t vl) {
+  return __riscv_vzip_vv_i8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                              size_t vl) {
+  return __riscv_vzip_vv_i8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                              size_t vl) {
+  return __riscv_vzip_vv_i8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_m(vbool64_t vm, vint16mf4_t vs2,
+                                  vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_i64m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u8mf4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u8mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_u8m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_u8m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_u8m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_u8m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_m(vbool64_t vm, vuint16mf4_t vs2,
+                                   vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16mf2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_m(vbool32_t vm, vuint16mf2_t vs2,
+                                 vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u16m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u16m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u16m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_m(vbool64_t vm, vuint32mf2_t vs2,
+                                 vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m1_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u32m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u32m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u32m8_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u64m2_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u64m4_m(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_u64m8_m(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpaire.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpaire.c
new file mode 100644
index 0000000000000..651e0080485a8
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpaire.c
@@ -0,0 +1,1267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                      vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                      vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
+                                    vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
+                                    vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
+                                    vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
+                                    vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                      vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+                                    vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+                                    vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+                                    vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+                                    vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
+                                    vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
+                                    vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
+                                    vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
+                                    vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
+                                    vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
+                                     vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
+                                   vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
+                                   vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
+                                   vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
+                                   vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
+                                   vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
+                                   vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
+                                   vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpairo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpairo.c
new file mode 100644
index 0000000000000..75599aa87a4c7
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vpairo.c
@@ -0,0 +1,1267 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1(vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2(vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4(vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8(vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1(vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2(vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4(vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8(vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1(vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2(vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4(vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8(vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8(vint8m8_t vs2, vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8(vint16m8_t vs2, vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8(vint32m8_t vs2, vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8(vint64m8_t vs2, vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8(vuint8m8_t vs2, vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2(vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8(vuint16m8_t vs2, vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2(vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8(vuint32m8_t vs2, vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8(vuint64m8_t vs2, vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                      vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                      vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_m(vbool16_t vm, vfloat16m1_t vs2,
+                                    vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_m(vbool8_t vm, vfloat16m2_t vs2,
+                                    vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_m(vbool4_t vm, vfloat16m4_t vs2,
+                                    vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> poison, <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_m(vbool2_t vm, vfloat16m8_t vs2,
+                                    vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                      vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_m(vbool32_t vm, vfloat32m1_t vs2,
+                                    vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_m(vbool16_t vm, vfloat32m2_t vs2,
+                                    vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_m(vbool8_t vm, vfloat32m4_t vs2,
+                                    vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> poison, <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_m(vbool4_t vm, vfloat32m8_t vs2,
+                                    vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_m(vbool64_t vm, vfloat64m1_t vs2,
+                                    vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_m(vbool32_t vm, vfloat64m2_t vs2,
+                                    vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_m(vbool16_t vm, vfloat64m4_t vs2,
+                                    vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> poison, <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_m(vbool8_t vm, vfloat64m8_t vs2,
+                                    vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_m(vbool1_t vm, vint8m8_t vs2, vint8m8_t vs1,
+                                size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_m(vbool64_t vm, vint16mf4_t vs2,
+                                    vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_m(vbool32_t vm, vint16mf2_t vs2,
+                                    vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_m(vbool2_t vm, vint16m8_t vs2, vint16m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_m(vbool64_t vm, vint32mf2_t vs2,
+                                    vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_m(vbool4_t vm, vint32m8_t vs2, vint32m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_m(vbool8_t vm, vint64m8_t vs2, vint64m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_m(vbool64_t vm, vuint8mf8_t vs2,
+                                   vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_m(vbool32_t vm, vuint8mf4_t vs2,
+                                   vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_m(vbool16_t vm, vuint8mf2_t vs2,
+                                   vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> poison, <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_m(vbool1_t vm, vuint8m8_t vs2, vuint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_m(vbool64_t vm, vuint16mf4_t vs2,
+                                     vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_m(vbool32_t vm, vuint16mf2_t vs2,
+                                     vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_m(vbool16_t vm, vuint16m1_t vs2,
+                                   vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_m(vbool8_t vm, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_m(vbool4_t vm, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> poison, <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_m(vbool2_t vm, vuint16m8_t vs2,
+                                   vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_m(vbool64_t vm, vuint32mf2_t vs2,
+                                     vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_m(vbool32_t vm, vuint32m1_t vs2,
+                                   vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_m(vbool16_t vm, vuint32m2_t vs2,
+                                   vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_m(vbool8_t vm, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> poison, <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_m(vbool4_t vm, vuint32m8_t vs2,
+                                   vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_m(vbool64_t vm, vuint64m1_t vs2,
+                                   vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_m(vbool32_t vm, vuint64m2_t vs2,
+                                   vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_m(vbool16_t vm, vuint64m4_t vs2,
+                                   vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> poison, <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_m(vbool8_t vm, vuint64m8_t vs2,
+                                   vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipe.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipe.c
new file mode 100644
index 0000000000000..c6ca796db8704
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipe.c
@@ -0,0 +1,973 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4(vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2(vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1(vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2(vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4(vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2(vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1(vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2(vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4(vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1(vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2(vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4(vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8(vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4(vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2(vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1(vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2(vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4(vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4(vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2(vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1(vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2(vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4(vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2(vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1(vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2(vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4(vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1(vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2(vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4(vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8(vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4(vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2(vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1(vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2(vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4(vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4(vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2(vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1(vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2(vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4(vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2(vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1(vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2(vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4(vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1(vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2(vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4(vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_m(vbool64_t vm, vfloat16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_m(vbool32_t vm, vfloat16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_m(vbool16_t vm, vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_m(vbool8_t vm, vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_m(vbool4_t vm, vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_m(vbool64_t vm, vfloat32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_m(vbool32_t vm, vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_m(vbool16_t vm, vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_m(vbool8_t vm, vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_m(vbool64_t vm, vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_m(vbool32_t vm, vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_m(vbool16_t vm, vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_m(vbool64_t vm, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_m(vbool32_t vm, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_m(vbool16_t vm, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_m(vbool8_t vm, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_m(vbool4_t vm, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_m(vbool2_t vm, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_m(vbool64_t vm, vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_m(vbool32_t vm, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_m(vbool16_t vm, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_m(vbool8_t vm, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_m(vbool4_t vm, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_m(vbool64_t vm, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_m(vbool32_t vm, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_m(vbool16_t vm, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_m(vbool8_t vm, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_m(vbool64_t vm, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_m(vbool32_t vm, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_m(vbool16_t vm, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_m(vbool64_t vm, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_m(vbool32_t vm, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_m(vbool16_t vm, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_m(vbool8_t vm, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_m(vbool4_t vm, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_m(vbool2_t vm, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_m(vbool64_t vm, vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_m(vbool32_t vm, vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_m(vbool16_t vm, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_m(vbool8_t vm, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_m(vbool4_t vm, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_m(vbool64_t vm, vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_m(vbool32_t vm, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_m(vbool16_t vm, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_m(vbool8_t vm, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_m(vbool64_t vm, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_m(vbool32_t vm, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_m(vbool16_t vm, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe(vm, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipo.c
new file mode 100644
index 0000000000000..e3fca9a6485ce
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vunzipo.c
@@ -0,0 +1,973 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4(vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2(vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1(vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2(vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4(vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2(vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1(vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2(vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4(vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1(vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2(vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4(vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8(vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4(vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2(vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1(vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2(vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4(vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4(vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2(vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1(vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2(vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4(vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2(vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1(vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2(vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4(vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1(vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2(vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4(vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8(vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4(vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2(vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1(vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2(vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4(vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4(vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2(vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1(vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2(vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4(vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2(vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1(vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2(vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4(vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1(vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2(vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4(vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> poison, <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_m(vbool64_t vm, vfloat16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> poison, <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_m(vbool32_t vm, vfloat16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> poison, <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_m(vbool16_t vm, vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> poison, <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_m(vbool8_t vm, vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> poison, <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_m(vbool4_t vm, vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> poison, <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_m(vbool64_t vm, vfloat32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> poison, <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_m(vbool32_t vm, vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> poison, <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_m(vbool16_t vm, vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> poison, <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_m(vbool8_t vm, vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> poison, <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_m(vbool64_t vm, vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> poison, <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_m(vbool32_t vm, vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> poison, <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_m(vbool16_t vm, vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_m(vbool64_t vm, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_m(vbool32_t vm, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_m(vbool16_t vm, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_m(vbool8_t vm, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_m(vbool4_t vm, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_m(vbool2_t vm, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_m(vbool64_t vm, vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_m(vbool32_t vm, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_m(vbool16_t vm, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_m(vbool8_t vm, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_m(vbool4_t vm, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_m(vbool64_t vm, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_m(vbool32_t vm, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_m(vbool16_t vm, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_m(vbool8_t vm, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_m(vbool64_t vm, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_m(vbool32_t vm, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_m(vbool16_t vm, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> poison, <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_m(vbool64_t vm, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> poison, <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_m(vbool32_t vm, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> poison, <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_m(vbool16_t vm, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> poison, <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_m(vbool8_t vm, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> poison, <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_m(vbool4_t vm, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> poison, <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_m(vbool2_t vm, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> poison, <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_m(vbool64_t vm, vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> poison, <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_m(vbool32_t vm, vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> poison, <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_m(vbool16_t vm, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> poison, <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_m(vbool8_t vm, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> poison, <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_m(vbool4_t vm, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> poison, <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_m(vbool64_t vm, vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> poison, <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_m(vbool32_t vm, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> poison, <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_m(vbool16_t vm, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> poison, <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_m(vbool8_t vm, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> poison, <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_m(vbool64_t vm, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> poison, <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_m(vbool32_t vm, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> poison, <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_m(vbool16_t vm, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo(vm, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vzip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vzip.c
new file mode 100644
index 0000000000000..d08189b0ec7b9
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/non-policy/overloaded/vzip.c
@@ -0,0 +1,1022 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.i64(<vscale x 2 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2(vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1(vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.i64(<vscale x 8 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2(vfloat16m1_t vs2, vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.i64(<vscale x 16 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4(vfloat16m2_t vs2, vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.i64(<vscale x 32 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8(vfloat16m4_t vs2, vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1(vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.i64(<vscale x 4 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2(vfloat32m1_t vs2, vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.i64(<vscale x 8 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4(vfloat32m2_t vs2, vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.i64(<vscale x 16 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8(vfloat32m4_t vs2, vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.i64(<vscale x 2 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2(vfloat64m1_t vs2, vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.i64(<vscale x 4 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4(vfloat64m2_t vs2, vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.i64(<vscale x 8 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8(vfloat64m4_t vs2, vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4(vint8mf8_t vs2, vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2(vint8mf4_t vs2, vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1(vint8mf2_t vs2, vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2(vint8m1_t vs2, vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4(vint8m2_t vs2, vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8(vint8m4_t vs2, vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2(vint16mf4_t vs2, vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1(vint16mf2_t vs2, vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2(vint16m1_t vs2, vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4(vint16m2_t vs2, vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8(vint16m4_t vs2, vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1(vint32mf2_t vs2, vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2(vint32m1_t vs2, vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4(vint32m2_t vs2, vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8(vint32m4_t vs2, vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2(vint64m1_t vs2, vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4(vint64m2_t vs2, vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8(vint64m4_t vs2, vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4(vuint8mf8_t vs2, vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2(vuint8mf4_t vs2, vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1(vuint8mf2_t vs2, vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2(vuint8m1_t vs2, vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4(vuint8m2_t vs2, vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8(vuint8m4_t vs2, vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2(vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1(vuint16mf2_t vs2, vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2(vuint16m1_t vs2, vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4(vuint16m2_t vs2, vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8(vuint16m4_t vs2, vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1(vuint32mf2_t vs2, vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2(vuint32m1_t vs2, vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4(vuint32m2_t vs2, vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8(vuint32m4_t vs2, vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2(vuint64m1_t vs2, vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4(vuint64m2_t vs2, vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8(vuint64m4_t vs2, vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip(vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> poison, <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_m(vbool64_t vm, vfloat16mf4_t vs2,
+                                    vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> poison, <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_m(vbool32_t vm, vfloat16mf2_t vs2,
+                                  vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> poison, <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_m(vbool16_t vm, vfloat16m1_t vs2,
+                                  vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> poison, <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_m(vbool8_t vm, vfloat16m2_t vs2,
+                                  vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> poison, <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_m(vbool4_t vm, vfloat16m4_t vs2,
+                                  vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> poison, <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_m(vbool64_t vm, vfloat32mf2_t vs2,
+                                  vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> poison, <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_m(vbool32_t vm, vfloat32m1_t vs2,
+                                  vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> poison, <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_m(vbool16_t vm, vfloat32m2_t vs2,
+                                  vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> poison, <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_m(vbool8_t vm, vfloat32m4_t vs2,
+                                  vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> poison, <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_m(vbool64_t vm, vfloat64m1_t vs2,
+                                  vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> poison, <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_m(vbool32_t vm, vfloat64m2_t vs2,
+                                  vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> poison, <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_m(vbool16_t vm, vfloat64m4_t vs2,
+                                  vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_m(vbool64_t vm, vint8mf8_t vs2, vint8mf8_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_m(vbool32_t vm, vint8mf4_t vs2, vint8mf4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_m(vbool16_t vm, vint8mf2_t vs2, vint8mf2_t vs1,
+                              size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_m(vbool8_t vm, vint8m1_t vs2, vint8m1_t vs1,
+                              size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_m(vbool4_t vm, vint8m2_t vs2, vint8m2_t vs1,
+                              size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_m(vbool2_t vm, vint8m4_t vs2, vint8m4_t vs1,
+                              size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_m(vbool64_t vm, vint16mf4_t vs2,
+                                  vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_m(vbool32_t vm, vint16mf2_t vs2, vint16mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_m(vbool16_t vm, vint16m1_t vs2, vint16m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_m(vbool8_t vm, vint16m2_t vs2, vint16m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_m(vbool4_t vm, vint16m4_t vs2, vint16m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_m(vbool64_t vm, vint32mf2_t vs2, vint32mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_m(vbool32_t vm, vint32m1_t vs2, vint32m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_m(vbool16_t vm, vint32m2_t vs2, vint32m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_m(vbool8_t vm, vint32m4_t vs2, vint32m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_m(vbool64_t vm, vint64m1_t vs2, vint64m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_m(vbool32_t vm, vint64m2_t vs2, vint64m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_m(vbool16_t vm, vint64m4_t vs2, vint64m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> poison, <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_m(vbool64_t vm, vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> poison, <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_m(vbool32_t vm, vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> poison, <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_m(vbool16_t vm, vuint8mf2_t vs2, vuint8mf2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> poison, <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_m(vbool8_t vm, vuint8m1_t vs2, vuint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> poison, <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_m(vbool4_t vm, vuint8m2_t vs2, vuint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_m
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> poison, <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_m(vbool2_t vm, vuint8m4_t vs2, vuint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> poison, <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_m(vbool64_t vm, vuint16mf4_t vs2,
+                                   vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> poison, <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_m(vbool32_t vm, vuint16mf2_t vs2,
+                                 vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> poison, <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_m(vbool16_t vm, vuint16m1_t vs2, vuint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> poison, <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_m(vbool8_t vm, vuint16m2_t vs2, vuint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_m
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> poison, <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_m(vbool4_t vm, vuint16m4_t vs2, vuint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> poison, <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_m(vbool64_t vm, vuint32mf2_t vs2,
+                                 vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> poison, <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_m(vbool32_t vm, vuint32m1_t vs2, vuint32m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> poison, <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_m(vbool16_t vm, vuint32m2_t vs2, vuint32m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_m
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> poison, <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_m(vbool8_t vm, vuint32m4_t vs2, vuint32m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_m
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> poison, <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_m(vbool64_t vm, vuint64m1_t vs2, vuint64m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_m
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> poison, <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_m(vbool32_t vm, vuint64m2_t vs2, vuint64m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_m
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> poison, <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_m(vbool16_t vm, vuint64m4_t vs2, vuint64m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip(vm, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpaire.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpaire.c
new file mode 100644
index 0000000000000..4e08fc4b0a8da
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpaire.c
@@ -0,0 +1,2723 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2,
+                                       vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2,
+                                       vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2,
+                                     vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2,
+                                     vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2,
+                                     vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2,
+                                     vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+                                       vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+                                     vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+                                     vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+                                     vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+                                     vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2,
+                                     vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2,
+                                     vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2,
+                                     vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2,
+                                     vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_f64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2,
+                                     vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2,
+                                     vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2,
+                                     vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                    vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                    vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                    vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                      vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                      vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                    vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                    vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                    vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                    vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2,
+                                      vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
+                                    vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2,
+                                    vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2,
+                                    vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2,
+                                    vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
+                                    vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2,
+                                    vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2,
+                                    vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2,
+                                    vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_f16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_f16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd,
+                                      vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+                                      vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd,
+                                      vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_f64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                    vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                    vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                    vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_i16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_i16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                    vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32mf2_t vs2, vint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_i32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                    vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                    vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                    vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                    vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                    vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                    vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                    vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                    vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_u16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_u16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_u32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m1_t vs2, vuint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m2_t vs2, vuint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m4_t vs2, vuint32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vuint32m8_t vs2, vuint32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m1_t vs2, vuint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m2_t vs2, vuint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m4_t vs2, vuint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd,
+                                     vuint64m8_t vs2, vuint64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_u64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd,
+                                       vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+                                       vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd,
+                                       vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+                                     vint8mf8_t vs2, vint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+                                     vint8mf4_t vs2, vint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+                                     vint8mf2_t vs2, vint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                   vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                   vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                   vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                   vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf4_t vs2, vint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16mf2_t vs2, vint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd,
+                                     vint16m1_t vs2, vint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                     vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                     vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                     vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32mf2_t vs2, vint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+                                     vint32m1_t vs2, vint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+                                     vint32m2_t vs2, vint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                     vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                     vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd,
+                                     vint64m1_t vs2, vint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd,
+                                     vint64m2_t vs2, vint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd,
+                                     vint64m4_t vs2, vint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                     vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                    vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                    vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                    vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                    vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m1_t vs2, vuint16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m2_t vs2, vuint16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m4_t vs2, vuint16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vuint16m8_t vs2, vuint16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m1_t vs2, vuint32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m2_t vs2, vuint32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m4_t vs2, vuint32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vuint32m8_t vs2, vuint32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m1_t vs2, vuint64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m2_t vs2, vuint64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m4_t vs2, vuint64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd,
+                                      vuint64m8_t vs2, vuint64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd,
+                                     vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_vv_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+                                     vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd,
+                                     vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_f64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32mf2_t vs2, vint32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_vv_i32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_i64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_vv_u8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_vv_u32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                    vuint32m8_t vs2, vuint32m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd,
+                                    vuint64m8_t vs2, vuint64m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_vv_u64m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpairo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpairo.c
new file mode 100644
index 0000000000000..ccf5121032247
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vpairo.c
@@ -0,0 +1,2723 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2,
+                                       vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2,
+                                       vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2,
+                                     vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2,
+                                     vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2,
+                                     vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2,
+                                     vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+                                       vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+                                     vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+                                     vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+                                     vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+                                     vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2,
+                                     vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2,
+                                     vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2,
+                                     vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2,
+                                     vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_f64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2,
+                                     vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2,
+                                     vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2,
+                                     vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                    vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                    vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                    vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                      vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                      vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                    vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                    vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                    vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                    vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2,
+                                      vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
+                                    vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2,
+                                    vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2,
+                                    vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2,
+                                    vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
+                                    vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2,
+                                    vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2,
+                                    vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2,
+                                    vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_f16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_f16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd,
+                                      vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_f32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+                                      vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd,
+                                      vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_f64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                    vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                    vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                    vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_i16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_i16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                    vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32mf2_t vs2, vint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_i32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                    vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                    vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                    vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                    vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                    vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                    vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                    vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                    vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u8mf8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_u16mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_u16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_u32mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m1_t vs2, vuint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m2_t vs2, vuint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m4_t vs2, vuint32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vuint32m8_t vs2, vuint32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m1_t vs2, vuint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u64m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m2_t vs2, vuint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m4_t vs2, vuint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd,
+                                     vuint64m8_t vs2, vuint64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_u64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_vv_f16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd,
+                                       vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_vv_f32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+                                       vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd,
+                                       vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+                                     vint8mf8_t vs2, vint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+                                     vint8mf4_t vs2, vint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+                                     vint8mf2_t vs2, vint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                   vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                   vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                   vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                   vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf4_t vs2, vint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_i16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16mf2_t vs2, vint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd,
+                                     vint16m1_t vs2, vint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                     vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                     vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                     vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32mf2_t vs2, vint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_i32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+                                     vint32m1_t vs2, vint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+                                     vint32m2_t vs2, vint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                     vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                     vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd,
+                                     vint64m1_t vs2, vint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd,
+                                     vint64m2_t vs2, vint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd,
+                                     vint64m4_t vs2, vint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                     vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u8mf8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                    vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                    vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                    vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                    vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_u16mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m1_t vs2, vuint16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m2_t vs2, vuint16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m4_t vs2, vuint16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vuint16m8_t vs2, vuint16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_vv_u32mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m1_t vs2, vuint32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m2_t vs2, vuint32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m4_t vs2, vuint32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vuint32m8_t vs2, vuint32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m1_t vs2, vuint64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u64m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m2_t vs2, vuint64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m4_t vs2, vuint64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd,
+                                      vuint64m8_t vs2, vuint64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd,
+                                     vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_vv_f32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+                                     vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd,
+                                     vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_f64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32mf2_t vs2, vint32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_vv_i32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_i64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u8mf8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_vv_u8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_vv_u32mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                    vuint32m8_t vs2, vuint32m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u64m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd,
+                                    vuint64m8_t vs2, vuint64m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_vv_u64m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipe.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipe.c
new file mode 100644
index 0000000000000..b516b276ed9c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipe.c
@@ -0,0 +1,2090 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf2_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_v_f16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_v_f16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tu(vfloat16m2_t vd, vfloat16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tu(vfloat16m4_t vd, vfloat16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_v_f32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tu(vfloat32m2_t vd, vfloat32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tu(vfloat32m4_t vd, vfloat32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tu(vfloat64m2_t vd, vfloat64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tu(vfloat64m4_t vd, vfloat64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_f64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tu(vint8mf8_t vd, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf8_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tu(vint8mf4_t vd, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tu(vint8mf2_t vd, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tu(vint8m1_t vd, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tu(vint8m2_t vd, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tu(vint8m4_t vd, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i8m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tu(vint16mf4_t vd, vint16mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tu(vint16mf2_t vd, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tu(vint16m1_t vd, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tu(vint16m2_t vd, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tu(vint16m4_t vd, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tu(vint32mf2_t vd, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tu(vint32m1_t vd, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tu(vint32m2_t vd, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tu(vint32m4_t vd, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tu(vint64m1_t vd, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tu(vint64m2_t vd, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tu(vint64m4_t vd, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tu(vuint8mf2_t vd, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tu(vuint8m2_t vd, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tu(vuint8m4_t vd, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_u16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tu(vuint16mf2_t vd, vuint16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_u16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tu(vuint16m2_t vd, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tu(vuint16m4_t vd, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_v_u32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tu(vuint32m2_t vd, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tu(vuint32m4_t vd, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tu(vuint64m2_t vd, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tu(vuint64m4_t vd, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i8mf8_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i8mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i8mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_i8m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_i8m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_i8m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_i64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_u8m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_u8m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_u8m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i8mf8_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i8mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i8mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_v_i64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u8m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u8m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u8m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_f64m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8mf8_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i8mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_v_i8m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_v_i8m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_v_i8m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_i32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_v_i64m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf8_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u8mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u8mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_u8m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_u8m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_v_u8m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_v_u32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_v_u64m4_mu(vm, vd, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipo.c
new file mode 100644
index 0000000000000..da073766c47d4
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vunzipo.c
@@ -0,0 +1,2090 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf2_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_v_f16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_v_f16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tu(vfloat16m2_t vd, vfloat16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tu(vfloat16m4_t vd, vfloat16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_v_f32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tu(vfloat32m2_t vd, vfloat32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tu(vfloat32m4_t vd, vfloat32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tu(vfloat64m2_t vd, vfloat64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tu(vfloat64m4_t vd, vfloat64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_f64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tu(vint8mf8_t vd, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf8_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tu(vint8mf4_t vd, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tu(vint8mf2_t vd, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tu(vint8m1_t vd, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tu(vint8m2_t vd, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tu(vint8m4_t vd, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i8m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tu(vint16mf4_t vd, vint16mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tu(vint16mf2_t vd, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tu(vint16m1_t vd, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tu(vint16m2_t vd, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tu(vint16m4_t vd, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tu(vint32mf2_t vd, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tu(vint32m1_t vd, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tu(vint32m2_t vd, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tu(vint32m4_t vd, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tu(vint64m1_t vd, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tu(vint64m2_t vd, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tu(vint64m4_t vd, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tu(vuint8mf2_t vd, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tu(vuint8m2_t vd, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tu(vuint8m4_t vd, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_u16mf4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tu(vuint16mf2_t vd, vuint16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_u16mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tu(vuint16m2_t vd, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tu(vuint16m4_t vd, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_v_u32mf2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tu(vuint32m2_t vd, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tu(vuint32m4_t vd, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tu(vuint64m2_t vd, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tu(vuint64m4_t vd, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i8mf8_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i8mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i8mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_i8m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_i8m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_i8m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_i64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_u8m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_u8m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_u8m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32mf2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i8mf8_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i8mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i8mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_v_i64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u8m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u8m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u8m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32mf2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_f64m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8mf8_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i8mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_v_i8m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_v_i8m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_v_i8m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_i32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_v_i64m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf8_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u8mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u8mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_u8m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_u8m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_v_u8m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u16m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u16m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u16m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32mf2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u32m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_v_u32m4_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m1_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m2_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_v_u64m4_mu(vm, vd, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vzip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vzip.c
new file mode 100644
index 0000000000000..9e500270143f5
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/non-overloaded/vzip.c
@@ -0,0 +1,2189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf4_t vs2,
+                                     vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2,
+                                   vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m1_t vs2,
+                                   vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m2_t vs2,
+                                   vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m4_t vs2,
+                                   vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2,
+                                   vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m1_t vs2,
+                                   vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m2_t vs2,
+                                   vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m4_t vs2,
+                                   vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m1_t vs2,
+                                   vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m2_t vs2,
+                                   vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m4_t vs2,
+                                   vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_f64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tu(vint8mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tu(vint8mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_i8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tu(vint8m2_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_i8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tu(vint8m4_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_i8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tu(vint8m8_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_vv_i8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tu(vint16mf2_t vd, vint16mf4_t vs2,
+                                   vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
+                                 vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tu(vint16m2_t vd, vint16m1_t vs2, vint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tu(vint16m4_t vd, vint16m2_t vs2, vint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tu(vint16m8_t vd, vint16m4_t vs2, vint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
+                                 vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tu(vint32m2_t vd, vint32m1_t vs2, vint32m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tu(vint32m4_t vd, vint32m2_t vs2, vint32m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tu(vint32m8_t vd, vint32m4_t vs2, vint32m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tu(vint64m2_t vd, vint64m1_t vs2, vint64m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tu(vint64m4_t vd, vint64m2_t vs2, vint64m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tu(vint64m8_t vd, vint64m4_t vs2, vint64m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_vv_i64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_u8m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tu(vuint8m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_u8m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tu(vuint8m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_u8m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tu(vuint8m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_vv_u8m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf4_t vs2,
+                                    vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16mf2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
+                                  vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tu(vuint16m2_t vd, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tu(vuint16m4_t vd, vuint16m2_t vs2,
+                                  vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tu(vuint16m8_t vd, vuint16m4_t vs2,
+                                  vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
+                                  vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m1_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2,
+                                  vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2,
+                                  vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2,
+                                  vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tu(vuint64m2_t vd, vuint64m1_t vs2,
+                                  vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m2_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tu(vuint64m4_t vd, vuint64m2_t vs2,
+                                  vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m4_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tu(vuint64m8_t vd, vuint64m4_t vs2,
+                                  vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m8_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tum(vbool64_t vm, vfloat16mf2_t vd,
+                                      vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vzip_vv_f16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tum(vbool32_t vm, vfloat16m1_t vd,
+                                    vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tum(vbool16_t vm, vfloat16m2_t vd,
+                                    vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tum(vbool8_t vm, vfloat16m4_t vd,
+                                    vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tum(vbool4_t vm, vfloat16m8_t vd,
+                                    vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tum(vbool64_t vm, vfloat32m1_t vd,
+                                    vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tum(vbool32_t vm, vfloat32m2_t vd,
+                                    vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tum(vbool16_t vm, vfloat32m4_t vd,
+                                    vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tum(vbool8_t vm, vfloat32m8_t vd,
+                                    vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tum(vbool64_t vm, vfloat64m2_t vd,
+                                    vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tum(vbool32_t vm, vfloat64m4_t vd,
+                                    vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tum(vbool16_t vm, vfloat64m8_t vd,
+                                    vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_f64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tum(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tum(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                                vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tum(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                                vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tum(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                                vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tum(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                                vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tum(vbool64_t vm, vint16mf2_t vd,
+                                    vint16mf4_t vs2, vint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_i16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                  vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tum(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tum(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tum(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                  vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tum(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                  vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tum(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                  vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tum(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                  vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tum(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                  vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tum(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                  vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tum(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                  vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tum(vbool64_t vm, vuint8mf4_t vd,
+                                   vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u8mf4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tum(vbool32_t vm, vuint8mf2_t vd,
+                                   vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u8mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                 vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tum(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                 vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tum(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                 vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tum(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                 vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tum(vbool64_t vm, vuint16mf2_t vd,
+                                     vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_u16mf2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
+                                   vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u16m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tum(vbool16_t vm, vuint16m2_t vd,
+                                   vuint16m1_t vs2, vuint16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u16m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tum(vbool8_t vm, vuint16m4_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tum(vbool4_t vm, vuint16m8_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
+                                   vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u32m1_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tum(vbool32_t vm, vuint32m2_t vd,
+                                   vuint32m1_t vs2, vuint32m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u32m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tum(vbool16_t vm, vuint32m4_t vd,
+                                   vuint32m2_t vs2, vuint32m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u32m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tum(vbool8_t vm, vuint32m8_t vd, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tum(vbool64_t vm, vuint64m2_t vd,
+                                   vuint64m1_t vs2, vuint64m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u64m2_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tum(vbool32_t vm, vuint64m4_t vd,
+                                   vuint64m2_t vs2, vuint64m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u64m4_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tum(vbool16_t vm, vuint64m8_t vd,
+                                   vuint64m4_t vs2, vuint64m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_u64m8_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tumu(vbool64_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vzip_vv_f16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tumu(vbool32_t vm, vfloat16m1_t vd,
+                                     vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tumu(vbool16_t vm, vfloat16m2_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tumu(vbool8_t vm, vfloat16m4_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tumu(vbool4_t vm, vfloat16m8_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tumu(vbool64_t vm, vfloat32m1_t vd,
+                                     vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tumu(vbool32_t vm, vfloat32m2_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tumu(vbool16_t vm, vfloat32m4_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tumu(vbool8_t vm, vfloat32m8_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tumu(vbool64_t vm, vfloat64m2_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tumu(vbool32_t vm, vfloat64m4_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tumu(vbool16_t vm, vfloat64m8_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tumu(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tumu(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tumu(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                                 vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tumu(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tumu(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tumu(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tumu(vbool64_t vm, vint16mf2_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_i16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tumu(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                   vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tumu(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tumu(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tumu(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tumu(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                   vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tumu(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tumu(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tumu(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tumu(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tumu(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tumu(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tumu(vbool64_t vm, vuint8mf4_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u8mf4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tumu(vbool32_t vm, vuint8mf2_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u8mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tumu(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                  vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tumu(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tumu(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tumu(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tumu(vbool64_t vm, vuint16mf2_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vzip_vv_u16mf2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tumu(vbool32_t vm, vuint16m1_t vd,
+                                    vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u16m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tumu(vbool16_t vm, vuint16m2_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u16m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tumu(vbool8_t vm, vuint16m4_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u16m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tumu(vbool4_t vm, vuint16m8_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u16m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tumu(vbool64_t vm, vuint32m1_t vd,
+                                    vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u32m1_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tumu(vbool32_t vm, vuint32m2_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u32m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tumu(vbool16_t vm, vuint32m4_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u32m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tumu(vbool8_t vm, vuint32m8_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u32m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tumu(vbool64_t vm, vuint64m2_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u64m2_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tumu(vbool32_t vm, vuint64m4_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u64m4_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tumu(vbool16_t vm, vuint64m8_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u64m8_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_mu(vbool64_t vm, vfloat16mf2_t vd,
+                                     vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_vv_f16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_mu(vbool32_t vm, vfloat16m1_t vd,
+                                   vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_mu(vbool16_t vm, vfloat16m2_t vd,
+                                   vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_mu(vbool8_t vm, vfloat16m4_t vd,
+                                   vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_mu(vbool4_t vm, vfloat16m8_t vd,
+                                   vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_mu(vbool64_t vm, vfloat32m1_t vd,
+                                   vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_mu(vbool32_t vm, vfloat32m2_t vd,
+                                   vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_mu(vbool16_t vm, vfloat32m4_t vd,
+                                   vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_mu(vbool8_t vm, vfloat32m8_t vd,
+                                   vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_mu(vbool64_t vm, vfloat64m2_t vd,
+                                   vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_mu(vbool32_t vm, vfloat64m4_t vd,
+                                   vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_mu(vbool16_t vm, vfloat64m8_t vd,
+                                   vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_f64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_mu(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                 vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_mu(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                 vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_mu(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                               vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_mu(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                               vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_mu(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                               vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_mu(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                               vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_mu(vbool64_t vm, vint16mf2_t vd,
+                                   vint16mf4_t vs2, vint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_vv_i16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_mu(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                 vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_mu(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                 vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_mu(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                 vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_mu(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                 vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_mu(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                 vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_mu(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                 vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_mu(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                 vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_mu(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                 vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_mu(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                 vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_mu(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                 vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_mu(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                 vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_i64m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_mu(vbool64_t vm, vuint8mf4_t vd, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_mu(vbool32_t vm, vuint8mf2_t vd, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_mu(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_mu(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_mu(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_mu(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u8m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_mu(vbool64_t vm, vuint16mf2_t vd,
+                                    vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_vv_u16mf2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_mu(vbool32_t vm, vuint16m1_t vd,
+                                  vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip_vv_u16m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_mu(vbool16_t vm, vuint16m2_t vd, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_mu(vbool8_t vm, vuint16m4_t vd, vuint16m2_t vs2,
+                                  vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_mu(vbool4_t vm, vuint16m8_t vd, vuint16m4_t vs2,
+                                  vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u16m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_mu(vbool64_t vm, vuint32m1_t vd,
+                                  vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip_vv_u32m1_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_mu(vbool32_t vm, vuint32m2_t vd, vuint32m1_t vs2,
+                                  vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_mu(vbool16_t vm, vuint32m4_t vd, vuint32m2_t vs2,
+                                  vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_mu(vbool8_t vm, vuint32m8_t vd, vuint32m4_t vs2,
+                                  vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u32m8_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_mu(vbool64_t vm, vuint64m2_t vd, vuint64m1_t vs2,
+                                  vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m2_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_mu(vbool32_t vm, vuint64m4_t vd, vuint64m2_t vs2,
+                                  vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m4_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_mu(vbool16_t vm, vuint64m8_t vd, vuint64m4_t vs2,
+                                  vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_vv_u64m8_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpaire.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpaire.c
new file mode 100644
index 0000000000000..68eeda68875c1
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpaire.c
@@ -0,0 +1,2723 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2,
+                                       vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2,
+                                       vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2,
+                                     vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2,
+                                     vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2,
+                                     vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2,
+                                     vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+                                       vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+                                     vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+                                     vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+                                     vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+                                     vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2,
+                                     vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2,
+                                     vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2,
+                                     vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2,
+                                     vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2,
+                                     vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2,
+                                     vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2,
+                                     vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                    vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                    vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                    vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                      vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                      vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                    vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                    vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                    vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                    vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2,
+                                      vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
+                                    vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2,
+                                    vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2,
+                                    vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2,
+                                    vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
+                                    vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2,
+                                    vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2,
+                                    vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2,
+                                    vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd,
+                                      vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+                                      vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd,
+                                      vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                    vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                    vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                    vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                    vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32mf2_t vs2, vint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                    vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                    vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                    vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                    vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                    vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                    vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                    vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                    vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m1_t vs2, vuint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m2_t vs2, vuint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m4_t vs2, vuint32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vuint32m8_t vs2, vuint32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m1_t vs2, vuint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m2_t vs2, vuint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m4_t vs2, vuint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd,
+                                     vuint64m8_t vs2, vuint64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd,
+                                       vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+                                       vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd,
+                                       vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+                                     vint8mf8_t vs2, vint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+                                     vint8mf4_t vs2, vint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+                                     vint8mf2_t vs2, vint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                   vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                   vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                   vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                   vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf4_t vs2, vint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16mf2_t vs2, vint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd,
+                                     vint16m1_t vs2, vint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                     vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                     vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                     vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32mf2_t vs2, vint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+                                     vint32m1_t vs2, vint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+                                     vint32m2_t vs2, vint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                     vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                     vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd,
+                                     vint64m1_t vs2, vint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd,
+                                     vint64m2_t vs2, vint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd,
+                                     vint64m4_t vs2, vint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                     vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                    vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                    vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                    vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                    vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m1_t vs2, vuint16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m2_t vs2, vuint16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m4_t vs2, vuint16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vuint16m8_t vs2, vuint16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m1_t vs2, vuint32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m2_t vs2, vuint32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m4_t vs2, vuint32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vuint32m8_t vs2, vuint32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m1_t vs2, vuint64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m2_t vs2, vuint64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m4_t vs2, vuint64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd,
+                                      vuint64m8_t vs2, vuint64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpaire_vv_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpaire_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpaire_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpaire_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpaire_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpaire_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpaire_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpaire_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpaire_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpaire_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpaire_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpaire_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd,
+                                     vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpaire_vv_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpaire_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpaire_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpaire_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpaire_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpaire_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpaire_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpaire_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpaire_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpaire_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+                                     vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpaire_vv_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpaire_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpaire_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpaire_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpaire_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpaire_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpaire_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpaire_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd,
+                                     vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpaire_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpaire_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpaire_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpaire_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpaire_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpaire_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpaire_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpaire_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpaire_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpaire_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpaire_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpaire_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpaire_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpaire_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32mf2_t vs2, vint32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpaire_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpaire_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpaire_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpaire_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpaire_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpaire_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpaire_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpaire_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpaire_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpaire_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpaire_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpaire_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpaire_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpaire_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpaire_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpaire_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpaire_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpaire_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpaire_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpaire_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpaire_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpaire_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpaire_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpaire_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpaire_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpaire_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpaire_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpaire_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpaire_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpaire_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpaire_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpaire_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpaire_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpaire_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpaire_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpaire_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpaire_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpaire_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpaire_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpaire_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpaire_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpaire_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpaire_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpaire_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                    vuint32m8_t vs2, vuint32m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpaire_vv_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpaire_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpaire_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpaire_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpaire_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpaire_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpaire_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpaire_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd,
+                                    vuint64m8_t vs2, vuint64m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpaire_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpairo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpairo.c
new file mode 100644
index 0000000000000..fa5777f59fe69
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vpairo.c
@@ -0,0 +1,2723 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf4_t vs2,
+                                       vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf2_t vs2,
+                                       vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tu(vfloat16m1_t vd, vfloat16m1_t vs2,
+                                     vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m2_t vs2,
+                                     vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m4_t vs2,
+                                     vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m8_t vs2,
+                                     vfloat16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tu(vfloat32mf2_t vd, vfloat32mf2_t vs2,
+                                       vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tu(vfloat32m1_t vd, vfloat32m1_t vs2,
+                                     vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m2_t vs2,
+                                     vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m4_t vs2,
+                                     vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m8_t vs2,
+                                     vfloat32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tu(vfloat64m1_t vd, vfloat64m1_t vs2,
+                                     vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m2_t vs2,
+                                     vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m4_t vs2,
+                                     vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m8_t vs2,
+                                     vfloat64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tu(vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tu(vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tu(vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tu(vint8m1_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tu(vint8m2_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tu(vint8m4_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tu(vint8m8_t vd, vint8m8_t vs2, vint8m8_t vs1,
+                                 size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tu(vint16mf4_t vd, vint16mf4_t vs2,
+                                     vint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tu(vint16mf2_t vd, vint16mf2_t vs2,
+                                     vint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tu(vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tu(vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tu(vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tu(vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tu(vint32mf2_t vd, vint32mf2_t vs2,
+                                     vint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tu(vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tu(vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tu(vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tu(vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tu(vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tu(vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tu(vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tu(vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tu(vuint8mf8_t vd, vuint8mf8_t vs2,
+                                    vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf4_t vs2,
+                                    vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf2_t vs2,
+                                    vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tu(vuint8m1_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tu(vuint8m2_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tu(vuint8m4_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tu(vuint8m8_t vd, vuint8m8_t vs2, vuint8m8_t vs1,
+                                  size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tu(vuint16mf4_t vd, vuint16mf4_t vs2,
+                                      vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf2_t vs2,
+                                      vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tu(vuint16m1_t vd, vuint16m1_t vs2,
+                                    vuint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tu(vuint16m2_t vd, vuint16m2_t vs2,
+                                    vuint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tu(vuint16m4_t vd, vuint16m4_t vs2,
+                                    vuint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tu(vuint16m8_t vd, vuint16m8_t vs2,
+                                    vuint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tu(vuint32mf2_t vd, vuint32mf2_t vs2,
+                                      vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tu(vuint32m1_t vd, vuint32m1_t vs2,
+                                    vuint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tu(vuint32m2_t vd, vuint32m2_t vs2,
+                                    vuint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tu(vuint32m4_t vd, vuint32m4_t vs2,
+                                    vuint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tu(vuint32m8_t vd, vuint32m8_t vs2,
+                                    vuint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tu(vuint64m1_t vd, vuint64m1_t vs2,
+                                    vuint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tu(vuint64m2_t vd, vuint64m2_t vs2,
+                                    vuint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tu(vuint64m4_t vd, vuint64m4_t vs2,
+                                    vuint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tu(vuint64m8_t vd, vuint64m8_t vs2,
+                                    vuint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tum(vbool2_t vm, vfloat16m8_t vd,
+                                      vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tum(vbool4_t vm, vfloat32m8_t vd,
+                                      vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tum(vbool8_t vm, vfloat64m8_t vd,
+                                      vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                    vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                    vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                    vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                  vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                  vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                  vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tum(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                  vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf4_t vs2, vint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16mf2_t vs2, vint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                    vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                    vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                    vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tum(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                    vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32mf2_t vs2, vint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                    vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                    vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                    vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tum(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                    vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                    vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                    vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                    vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tum(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                    vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                   vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                   vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                   vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tum(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                   vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m1_t vs2, vuint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m2_t vs2, vuint16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m4_t vs2, vuint16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tum(vbool2_t vm, vuint16m8_t vd,
+                                     vuint16m8_t vs2, vuint16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m1_t vs2, vuint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m2_t vs2, vuint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m4_t vs2, vuint32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tum(vbool4_t vm, vuint32m8_t vd,
+                                     vuint32m8_t vs2, vuint32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m1_t vs2, vuint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m2_t vs2, vuint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m4_t vs2, vuint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tum(vbool8_t vm, vuint64m8_t vd,
+                                     vuint64m8_t vs2, vuint64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_tumu(vbool2_t vm, vfloat16m8_t vd,
+                                       vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                         size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_tumu(vbool4_t vm, vfloat32m8_t vd,
+                                       vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_tumu(vbool8_t vm, vfloat64m8_t vd,
+                                       vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd,
+                                     vint8mf8_t vs2, vint8mf8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd,
+                                     vint8mf4_t vs2, vint8mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd,
+                                     vint8mf2_t vs2, vint8mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                   vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                   vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                   vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_tumu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                   vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf4_t vs2, vint16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16mf2_t vs2, vint16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_tumu(vbool16_t vm, vint16m1_t vd,
+                                     vint16m1_t vs2, vint16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                     vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                     vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_tumu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                     vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32mf2_t vs2, vint32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_tumu(vbool32_t vm, vint32m1_t vd,
+                                     vint32m1_t vs2, vint32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_tumu(vbool16_t vm, vint32m2_t vd,
+                                     vint32m2_t vs2, vint32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                     vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_tumu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                     vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_tumu(vbool64_t vm, vint64m1_t vd,
+                                     vint64m1_t vs2, vint64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_tumu(vbool32_t vm, vint64m2_t vd,
+                                     vint64m2_t vs2, vint64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_tumu(vbool16_t vm, vint64m4_t vd,
+                                     vint64m4_t vs2, vint64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_tumu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                     vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                    vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                    vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                    vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_tumu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                    vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m1_t vs2, vuint16m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m2_t vs2, vuint16m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m4_t vs2, vuint16m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_tumu(vbool2_t vm, vuint16m8_t vd,
+                                      vuint16m8_t vs2, vuint16m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                        size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m1_t vs2, vuint32m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m2_t vs2, vuint32m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m4_t vs2, vuint32m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_tumu(vbool4_t vm, vuint32m8_t vd,
+                                      vuint32m8_t vs2, vuint32m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m1_t vs2, vuint64m1_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m2_t vs2, vuint64m2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m4_t vs2, vuint64m4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_tumu(vbool8_t vm, vuint64m8_t vd,
+                                      vuint64m8_t vs2, vuint64m8_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vpairo_vv_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16.i64(<vscale x 1 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vpairo_vv_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vpairo_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16.i64(<vscale x 2 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vpairo_vv_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vpairo_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16.i64(<vscale x 4 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vpairo_vv_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vpairo_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16.i64(<vscale x 8 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vpairo_vv_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vpairo_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16.i64(<vscale x 16 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vpairo_vv_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vpairo_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 32 x half> [[VS2:%.*]], <vscale x 32 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16.i64(<vscale x 32 x half> [[VD]], <vscale x 32 x half> [[VS2]], <vscale x 32 x half> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vpairo_vv_f16m8_mu(vbool2_t vm, vfloat16m8_t vd,
+                                     vfloat16m8_t vs2, vfloat16m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vpairo_vv_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32.i64(<vscale x 1 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vpairo_vv_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                       size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vpairo_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32.i64(<vscale x 2 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vpairo_vv_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vpairo_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32.i64(<vscale x 4 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vpairo_vv_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vpairo_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32.i64(<vscale x 8 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vpairo_vv_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vpairo_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 16 x float> [[VS2:%.*]], <vscale x 16 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32.i64(<vscale x 16 x float> [[VD]], <vscale x 16 x float> [[VS2]], <vscale x 16 x float> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vpairo_vv_f32m8_mu(vbool4_t vm, vfloat32m8_t vd,
+                                     vfloat32m8_t vs2, vfloat32m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vpairo_vv_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64.i64(<vscale x 1 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vpairo_vv_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vpairo_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64.i64(<vscale x 2 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vpairo_vv_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vpairo_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64.i64(<vscale x 4 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vpairo_vv_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vpairo_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 8 x double> [[VS2:%.*]], <vscale x 8 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64.i64(<vscale x 8 x double> [[VD]], <vscale x 8 x double> [[VS2]], <vscale x 8 x double> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vpairo_vv_f64m8_mu(vbool8_t vm, vfloat64m8_t vd,
+                                     vfloat64m8_t vs2, vfloat64m8_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vpairo_vv_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vpairo_vv_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vpairo_vv_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8mf2_t vs2,
+                                   vint8mf2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vpairo_vv_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vpairo_vv_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vpairo_vv_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vpairo_vv_i8m8_mu(vbool1_t vm, vint8m8_t vd, vint8m8_t vs2,
+                                 vint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vpairo_vv_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vpairo_vv_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16mf2_t vs2, vint16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vpairo_vv_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vpairo_vv_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vpairo_vv_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vpairo_vv_i16m8_mu(vbool2_t vm, vint16m8_t vd, vint16m8_t vs2,
+                                   vint16m8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vpairo_vv_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32mf2_t vs2, vint32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vpairo_vv_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vpairo_vv_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vpairo_vv_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vpairo_vv_i32m8_mu(vbool4_t vm, vint32m8_t vd, vint32m8_t vs2,
+                                   vint32m8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vpairo_vv_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vpairo_vv_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vpairo_vv_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vpairo_vv_i64m8_mu(vbool8_t vm, vint64m8_t vd, vint64m8_t vs2,
+                                   vint64m8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vpairo_vv_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vpairo_vv_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vpairo_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vpairo_vv_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vpairo_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vpairo_vv_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd,
+                                    vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vpairo_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vpairo_vv_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vpairo_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vpairo_vv_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vpairo_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vpairo_vv_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vpairo_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 64 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS2:%.*]], <vscale x 64 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 64 x i8> [[VS2]], <vscale x 64 x i8> [[VS1]], <vscale x 64 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vpairo_vv_u8m8_mu(vbool1_t vm, vuint8m8_t vd, vuint8m8_t vs2,
+                                  vuint8m8_t vs1, size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vpairo_vv_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vpairo_vv_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vpairo_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vpairo_vv_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vpairo_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vpairo_vv_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vpairo_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vpairo_vv_u16m2_mu(vbool8_t vm, vuint16m2_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vpairo_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vpairo_vv_u16m4_mu(vbool4_t vm, vuint16m4_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vpairo_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS2:%.*]], <vscale x 32 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 32 x i16> [[VS2]], <vscale x 32 x i16> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vpairo_vv_u16m8_mu(vbool2_t vm, vuint16m8_t vd,
+                                    vuint16m8_t vs2, vuint16m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vpairo_vv_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vpairo_vv_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                      size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vpairo_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vpairo_vv_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vpairo_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vpairo_vv_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vpairo_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vpairo_vv_u32m4_mu(vbool8_t vm, vuint32m4_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vpairo_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], <vscale x 16 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 16 x i32> [[VS2]], <vscale x 16 x i32> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vpairo_vv_u32m8_mu(vbool4_t vm, vuint32m8_t vd,
+                                    vuint32m8_t vs2, vuint32m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vpairo_vv_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vpairo_vv_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vpairo_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vpairo_vv_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vpairo_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vpairo_vv_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vpairo_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS2:%.*]], <vscale x 8 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 8 x i64> [[VS2]], <vscale x 8 x i64> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vpairo_vv_u64m8_mu(vbool8_t vm, vuint64m8_t vd,
+                                    vuint64m8_t vs2, vuint64m8_t vs1,
+                                    size_t vl) {
+  return __riscv_vpairo_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipe.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipe.c
new file mode 100644
index 0000000000000..3b2b68028bd77
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipe.c
@@ -0,0 +1,2090 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf2_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tu(vfloat16m2_t vd, vfloat16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tu(vfloat16m4_t vd, vfloat16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tu(vfloat32m2_t vd, vfloat32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tu(vfloat32m4_t vd, vfloat32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tu(vfloat64m2_t vd, vfloat64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tu(vfloat64m4_t vd, vfloat64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tu(vint8mf8_t vd, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tu(vint8mf4_t vd, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tu(vint8mf2_t vd, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tu(vint8m1_t vd, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tu(vint8m2_t vd, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tu(vint8m4_t vd, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tu(vint16mf4_t vd, vint16mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tu(vint16mf2_t vd, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tu(vint16m1_t vd, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tu(vint16m2_t vd, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tu(vint16m4_t vd, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tu(vint32mf2_t vd, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tu(vint32m1_t vd, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tu(vint32m2_t vd, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tu(vint32m4_t vd, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tu(vint64m1_t vd, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tu(vint64m2_t vd, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tu(vint64m4_t vd, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tu(vuint8mf2_t vd, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tu(vuint8m2_t vd, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tu(vuint8m4_t vd, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tu(vuint16mf2_t vd, vuint16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tu(vuint16m2_t vd, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tu(vuint16m4_t vd, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tu(vuint32m2_t vd, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tu(vuint32m4_t vd, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tu(vuint64m2_t vd, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tu(vuint64m4_t vd, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipe_v_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipe_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipe_v_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipe_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipe_v_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipe_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipe_v_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipe_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipe_v_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipe_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipe_v_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipe_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipe_v_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipe_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipe_v_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipe_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipe_v_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipe_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipe_v_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipe_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipe_v_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipe_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipe_v_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipe_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipe_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipe_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipe_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipe_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipe_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipe_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipe_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipe_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipe_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipe_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipe_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipe_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipe_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipe_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipe_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipe_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipe_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipe_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipe_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipe_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipe_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipe_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipe_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipe_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipe_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipe_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipe_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipe_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipe_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipe_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipe_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipe_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipe_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipe_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipe_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipe_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipe_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipe_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipe_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipe_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipe_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipe_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipe_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipe_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipe_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipe_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipe_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipe_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipe_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipe_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipe_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipe_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipe_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipe_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipe_mu(vm, vd, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipo.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipo.c
new file mode 100644
index 0000000000000..442bffaf27c64
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vunzipo.c
@@ -0,0 +1,2090 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tu(vfloat16mf4_t vd, vfloat16mf2_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tu(vfloat16mf2_t vd, vfloat16m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tu(vfloat16m1_t vd, vfloat16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tu(vfloat16m2_t vd, vfloat16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tu(vfloat16m4_t vd, vfloat16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tu(vfloat32mf2_t vd, vfloat32m1_t vs,
+                                       size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tu(vfloat32m1_t vd, vfloat32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tu(vfloat32m2_t vd, vfloat32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tu(vfloat32m4_t vd, vfloat32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tu(vfloat64m1_t vd, vfloat64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tu(vfloat64m2_t vd, vfloat64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tu(vfloat64m4_t vd, vfloat64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tu(vint8mf8_t vd, vint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tu(vint8mf4_t vd, vint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tu(vint8mf2_t vd, vint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tu(vint8m1_t vd, vint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tu(vint8m2_t vd, vint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tu(vint8m4_t vd, vint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tu(vint16mf4_t vd, vint16mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tu(vint16mf2_t vd, vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tu(vint16m1_t vd, vint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tu(vint16m2_t vd, vint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tu(vint16m4_t vd, vint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tu(vint32mf2_t vd, vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tu(vint32m1_t vd, vint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tu(vint32m2_t vd, vint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tu(vint32m4_t vd, vint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tu(vint64m1_t vd, vint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tu(vint64m2_t vd, vint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tu(vint64m4_t vd, vint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tu(vuint8mf8_t vd, vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tu(vuint8mf4_t vd, vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tu(vuint8mf2_t vd, vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tu(vuint8m1_t vd, vuint8m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tu(vuint8m2_t vd, vuint8m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tu(vuint8m4_t vd, vuint8m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tu(vuint16mf4_t vd, vuint16mf2_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tu(vuint16mf2_t vd, vuint16m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tu(vuint16m1_t vd, vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tu(vuint16m2_t vd, vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tu(vuint16m4_t vd, vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tu(vuint32mf2_t vd, vuint32m1_t vs,
+                                      size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tu(vuint32m1_t vd, vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tu(vuint32m2_t vd, vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tu(vuint32m4_t vd, vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tu
+// CHECK-RV64-SAME: (<vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tu(vuint64m1_t vd, vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tu(vuint64m2_t vd, vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tu(vuint64m4_t vd, vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tu(vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tum(vbool64_t vm, vfloat16mf4_t vd,
+                                        vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tum(vbool32_t vm, vfloat16mf2_t vd,
+                                        vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tum(vbool16_t vm, vfloat16m1_t vd,
+                                      vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tum(vbool8_t vm, vfloat16m2_t vd,
+                                      vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tum(vbool4_t vm, vfloat16m4_t vd,
+                                      vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tum(vbool64_t vm, vfloat32mf2_t vd,
+                                        vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tum(vbool32_t vm, vfloat32m1_t vd,
+                                      vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tum(vbool16_t vm, vfloat32m2_t vd,
+                                      vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tum(vbool8_t vm, vfloat32m4_t vd,
+                                      vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tum(vbool64_t vm, vfloat64m1_t vd,
+                                      vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tum(vbool32_t vm, vfloat64m2_t vd,
+                                      vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tum(vbool16_t vm, vfloat64m4_t vd,
+                                      vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tum(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tum(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tum(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tum(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tum(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tum(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tum(vbool64_t vm, vint16mf4_t vd,
+                                      vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tum(vbool32_t vm, vint16mf2_t vd,
+                                      vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tum(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tum(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tum(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tum(vbool64_t vm, vint32mf2_t vd,
+                                      vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tum(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tum(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tum(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tum(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tum(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tum(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tum(vbool64_t vm, vuint8mf8_t vd,
+                                     vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tum(vbool32_t vm, vuint8mf4_t vd,
+                                     vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tum(vbool16_t vm, vuint8mf2_t vd,
+                                     vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tum(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tum(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tum(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tum(vbool64_t vm, vuint16mf4_t vd,
+                                       vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tum(vbool32_t vm, vuint16mf2_t vd,
+                                       vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tum(vbool16_t vm, vuint16m1_t vd,
+                                     vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tum(vbool8_t vm, vuint16m2_t vd,
+                                     vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tum(vbool4_t vm, vuint16m4_t vd,
+                                     vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tum(vbool64_t vm, vuint32mf2_t vd,
+                                       vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tum(vbool32_t vm, vuint32m1_t vd,
+                                     vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tum(vbool16_t vm, vuint32m2_t vd,
+                                     vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tum(vbool8_t vm, vuint32m4_t vd,
+                                     vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tum(vbool64_t vm, vuint64m1_t vd,
+                                     vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tum(vbool32_t vm, vuint64m2_t vd,
+                                     vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tum(vbool16_t vm, vuint64m4_t vd,
+                                     vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tum(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_tumu(vbool64_t vm, vfloat16mf4_t vd,
+                                         vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_tumu(vbool32_t vm, vfloat16mf2_t vd,
+                                         vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_tumu(vbool16_t vm, vfloat16m1_t vd,
+                                       vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_tumu(vbool8_t vm, vfloat16m2_t vd,
+                                       vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_tumu(vbool4_t vm, vfloat16m4_t vd,
+                                       vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_tumu(vbool64_t vm, vfloat32mf2_t vd,
+                                         vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_tumu(vbool32_t vm, vfloat32m1_t vd,
+                                       vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_tumu(vbool16_t vm, vfloat32m2_t vd,
+                                       vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_tumu(vbool8_t vm, vfloat32m4_t vd,
+                                       vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_tumu(vbool64_t vm, vfloat64m1_t vd,
+                                       vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_tumu(vbool32_t vm, vfloat64m2_t vd,
+                                       vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_tumu(vbool16_t vm, vfloat64m4_t vd,
+                                       vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_tumu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_tumu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_tumu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_tumu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_tumu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_tumu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_tumu(vbool64_t vm, vint16mf4_t vd,
+                                       vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_tumu(vbool32_t vm, vint16mf2_t vd,
+                                       vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_tumu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_tumu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_tumu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_tumu(vbool64_t vm, vint32mf2_t vd,
+                                       vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_tumu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_tumu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_tumu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_tumu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_tumu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_tumu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                     size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_tumu(vbool64_t vm, vuint8mf8_t vd,
+                                      vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_tumu(vbool32_t vm, vuint8mf4_t vd,
+                                      vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_tumu(vbool16_t vm, vuint8mf2_t vd,
+                                      vuint8m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_tumu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_tumu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_tumu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_tumu(vbool64_t vm, vuint16mf4_t vd,
+                                        vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_tumu(vbool32_t vm, vuint16mf2_t vd,
+                                        vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_tumu(vbool16_t vm, vuint16m1_t vd,
+                                      vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_tumu(vbool8_t vm, vuint16m2_t vd,
+                                      vuint16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_tumu(vbool4_t vm, vuint16m4_t vd,
+                                      vuint16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_tumu(vbool64_t vm, vuint32mf2_t vd,
+                                        vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_tumu(vbool32_t vm, vuint32m1_t vd,
+                                      vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_tumu(vbool16_t vm, vuint32m2_t vd,
+                                      vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_tumu(vbool8_t vm, vuint32m4_t vd,
+                                      vuint32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_tumu(vbool64_t vm, vuint64m1_t vd,
+                                      vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_tumu(vbool32_t vm, vuint64m2_t vd,
+                                      vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_tumu(vbool16_t vm, vuint64m4_t vd,
+                                      vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_tumu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x half> @test_vunzipo_v_f16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x half> [[VD:%.*]], <vscale x 2 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16.i64(<vscale x 1 x half> [[VD]], <vscale x 2 x half> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x half> [[TMP0]]
+//
+vfloat16mf4_t test_vunzipo_v_f16mf4_mu(vbool64_t vm, vfloat16mf4_t vd,
+                                       vfloat16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vunzipo_v_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 4 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16.i64(<vscale x 2 x half> [[VD]], <vscale x 4 x half> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vunzipo_v_f16mf2_mu(vbool32_t vm, vfloat16mf2_t vd,
+                                       vfloat16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vunzipo_v_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 8 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16.i64(<vscale x 4 x half> [[VD]], <vscale x 8 x half> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vunzipo_v_f16m1_mu(vbool16_t vm, vfloat16m1_t vd,
+                                     vfloat16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vunzipo_v_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 16 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16.i64(<vscale x 8 x half> [[VD]], <vscale x 16 x half> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vunzipo_v_f16m2_mu(vbool8_t vm, vfloat16m2_t vd,
+                                     vfloat16m4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vunzipo_v_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 32 x half> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16.i64(<vscale x 16 x half> [[VD]], <vscale x 32 x half> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vunzipo_v_f16m4_mu(vbool4_t vm, vfloat16m4_t vd,
+                                     vfloat16m8_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x float> @test_vunzipo_v_f32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x float> [[VD:%.*]], <vscale x 2 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32.i64(<vscale x 1 x float> [[VD]], <vscale x 2 x float> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
+//
+vfloat32mf2_t test_vunzipo_v_f32mf2_mu(vbool64_t vm, vfloat32mf2_t vd,
+                                       vfloat32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vunzipo_v_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 4 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32.i64(<vscale x 2 x float> [[VD]], <vscale x 4 x float> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vunzipo_v_f32m1_mu(vbool32_t vm, vfloat32m1_t vd,
+                                     vfloat32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vunzipo_v_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 8 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32.i64(<vscale x 4 x float> [[VD]], <vscale x 8 x float> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vunzipo_v_f32m2_mu(vbool16_t vm, vfloat32m2_t vd,
+                                     vfloat32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vunzipo_v_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 16 x float> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32.i64(<vscale x 8 x float> [[VD]], <vscale x 16 x float> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vunzipo_v_f32m4_mu(vbool8_t vm, vfloat32m4_t vd,
+                                     vfloat32m8_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x double> @test_vunzipo_v_f64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x double> [[VD:%.*]], <vscale x 2 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64.i64(<vscale x 1 x double> [[VD]], <vscale x 2 x double> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
+//
+vfloat64m1_t test_vunzipo_v_f64m1_mu(vbool64_t vm, vfloat64m1_t vd,
+                                     vfloat64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vunzipo_v_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 4 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64.i64(<vscale x 2 x double> [[VD]], <vscale x 4 x double> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vunzipo_v_f64m2_mu(vbool32_t vm, vfloat64m2_t vd,
+                                     vfloat64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vunzipo_v_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 8 x double> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64.i64(<vscale x 4 x double> [[VD]], <vscale x 8 x double> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vunzipo_v_f64m4_mu(vbool16_t vm, vfloat64m4_t vd,
+                                     vfloat64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_i8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vunzipo_v_i8mf8_mu(vbool64_t vm, vint8mf8_t vd, vint8mf4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vunzipo_v_i8mf4_mu(vbool32_t vm, vint8mf4_t vd, vint8mf2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vunzipo_v_i8mf2_mu(vbool16_t vm, vint8mf2_t vd, vint8m1_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vunzipo_v_i8m1_mu(vbool8_t vm, vint8m1_t vd, vint8m2_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vunzipo_v_i8m2_mu(vbool4_t vm, vint8m2_t vd, vint8m4_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vunzipo_v_i8m4_mu(vbool2_t vm, vint8m4_t vd, vint8m8_t vs,
+                                 size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_i16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vunzipo_v_i16mf4_mu(vbool64_t vm, vint16mf4_t vd,
+                                     vint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vunzipo_v_i16mf2_mu(vbool32_t vm, vint16mf2_t vd,
+                                     vint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vunzipo_v_i16m1_mu(vbool16_t vm, vint16m1_t vd, vint16m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vunzipo_v_i16m2_mu(vbool8_t vm, vint16m2_t vd, vint16m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vunzipo_v_i16m4_mu(vbool4_t vm, vint16m4_t vd, vint16m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_i32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vunzipo_v_i32mf2_mu(vbool64_t vm, vint32mf2_t vd,
+                                     vint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vunzipo_v_i32m1_mu(vbool32_t vm, vint32m1_t vd, vint32m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vunzipo_v_i32m2_mu(vbool16_t vm, vint32m2_t vd, vint32m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vunzipo_v_i32m4_mu(vbool8_t vm, vint32m4_t vd, vint32m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_i64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vunzipo_v_i64m1_mu(vbool64_t vm, vint64m1_t vd, vint64m2_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vunzipo_v_i64m2_mu(vbool32_t vm, vint64m2_t vd, vint64m4_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vunzipo_v_i64m4_mu(vbool16_t vm, vint64m4_t vd, vint64m8_t vs,
+                                   size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i8> @test_vunzipo_v_u8mf8_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8.i64(<vscale x 1 x i8> [[VD]], <vscale x 2 x i8> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vunzipo_v_u8mf8_mu(vbool64_t vm, vuint8mf8_t vd,
+                                    vuint8mf4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vunzipo_v_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 4 x i8> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vunzipo_v_u8mf4_mu(vbool32_t vm, vuint8mf4_t vd,
+                                    vuint8mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vunzipo_v_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 8 x i8> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vunzipo_v_u8mf2_mu(vbool16_t vm, vuint8mf2_t vd, vuint8m1_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vunzipo_v_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 16 x i8> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vunzipo_v_u8m1_mu(vbool8_t vm, vuint8m1_t vd, vuint8m2_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vunzipo_v_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 32 x i8> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vunzipo_v_u8m2_mu(vbool4_t vm, vuint8m2_t vd, vuint8m4_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vunzipo_v_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 64 x i8> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 64 x i8> [[VS]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vunzipo_v_u8m4_mu(vbool2_t vm, vuint8m4_t vd, vuint8m8_t vs,
+                                  size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i16> @test_vunzipo_v_u16mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16.i64(<vscale x 1 x i16> [[VD]], <vscale x 2 x i16> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vunzipo_v_u16mf4_mu(vbool64_t vm, vuint16mf4_t vd,
+                                      vuint16mf2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vunzipo_v_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 4 x i16> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vunzipo_v_u16mf2_mu(vbool32_t vm, vuint16mf2_t vd,
+                                      vuint16m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vunzipo_v_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 8 x i16> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vunzipo_v_u16m1_mu(vbool16_t vm, vuint16m1_t vd,
+                                    vuint16m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vunzipo_v_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 16 x i16> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vunzipo_v_u16m2_mu(vbool8_t vm, vuint16m2_t vd, vuint16m4_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vunzipo_v_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 32 x i16> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 32 x i16> [[VS]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vunzipo_v_u16m4_mu(vbool4_t vm, vuint16m4_t vd, vuint16m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i32> @test_vunzipo_v_u32mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32.i64(<vscale x 1 x i32> [[VD]], <vscale x 2 x i32> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vunzipo_v_u32mf2_mu(vbool64_t vm, vuint32mf2_t vd,
+                                      vuint32m1_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vunzipo_v_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 4 x i32> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vunzipo_v_u32m1_mu(vbool32_t vm, vuint32m1_t vd,
+                                    vuint32m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vunzipo_v_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 8 x i32> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vunzipo_v_u32m2_mu(vbool16_t vm, vuint32m2_t vd,
+                                    vuint32m4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vunzipo_v_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 16 x i32> [[VS]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vunzipo_v_u32m4_mu(vbool8_t vm, vuint32m4_t vd, vuint32m8_t vs,
+                                    size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 1 x i64> @test_vunzipo_v_u64m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 1 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64.i64(<vscale x 1 x i64> [[VD]], <vscale x 2 x i64> [[VS]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vunzipo_v_u64m1_mu(vbool64_t vm, vuint64m1_t vd,
+                                    vuint64m2_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vunzipo_v_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 4 x i64> [[VS]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vunzipo_v_u64m2_mu(vbool32_t vm, vuint64m2_t vd,
+                                    vuint64m4_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vunzipo_v_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 8 x i64> [[VS:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 8 x i64> [[VS]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vunzipo_v_u64m4_mu(vbool16_t vm, vuint64m4_t vd,
+                                    vuint64m8_t vs, size_t vl) {
+  return __riscv_vunzipo_mu(vm, vd, vs, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vzip.c b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vzip.c
new file mode 100644
index 0000000000000..312be0dc4055e
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics-autogenerated/zvzip/policy/overloaded/vzip.c
@@ -0,0 +1,2189 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --version 2
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -target-feature +zfh \
+// RUN:   -target-feature +zvfh -target-feature +zvfbfmin -target-feature +zvfbfwma \
+// RUN:   -target-feature +zve64x -target-feature +zve64d \
+// RUN:   -target-feature +experimental-zvzip -disable-O0-optnone \
+// RUN:   -emit-llvm %s -o - | opt -S -passes=mem2reg | \
+// RUN:   FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0:[0-9]+]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tu(vfloat16mf2_t vd, vfloat16mf4_t vs2,
+                                     vfloat16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tu(vfloat16m1_t vd, vfloat16mf2_t vs2,
+                                   vfloat16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tu(vfloat16m2_t vd, vfloat16m1_t vs2,
+                                   vfloat16m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tu(vfloat16m4_t vd, vfloat16m2_t vs2,
+                                   vfloat16m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tu(vfloat16m8_t vd, vfloat16m4_t vs2,
+                                   vfloat16m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tu(vfloat32m1_t vd, vfloat32mf2_t vs2,
+                                   vfloat32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tu(vfloat32m2_t vd, vfloat32m1_t vs2,
+                                   vfloat32m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tu(vfloat32m4_t vd, vfloat32m2_t vs2,
+                                   vfloat32m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tu(vfloat32m8_t vd, vfloat32m4_t vs2,
+                                   vfloat32m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tu(vfloat64m2_t vd, vfloat64m1_t vs2,
+                                   vfloat64m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tu(vfloat64m4_t vd, vfloat64m2_t vs2,
+                                   vfloat64m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tu(vfloat64m8_t vd, vfloat64m4_t vs2,
+                                   vfloat64m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tu(vint8mf4_t vd, vint8mf8_t vs2, vint8mf8_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tu(vint8mf2_t vd, vint8mf4_t vs2, vint8mf4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tu(vint8m1_t vd, vint8mf2_t vs2, vint8mf2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tu(vint8m2_t vd, vint8m1_t vs2, vint8m1_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tu(vint8m4_t vd, vint8m2_t vs2, vint8m2_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tu(vint8m8_t vd, vint8m4_t vs2, vint8m4_t vs1,
+                               size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tu(vint16mf2_t vd, vint16mf4_t vs2,
+                                   vint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tu(vint16m1_t vd, vint16mf2_t vs2,
+                                 vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tu(vint16m2_t vd, vint16m1_t vs2, vint16m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tu(vint16m4_t vd, vint16m2_t vs2, vint16m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tu(vint16m8_t vd, vint16m4_t vs2, vint16m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tu(vint32m1_t vd, vint32mf2_t vs2,
+                                 vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tu(vint32m2_t vd, vint32m1_t vs2, vint32m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tu(vint32m4_t vd, vint32m2_t vs2, vint32m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tu(vint32m8_t vd, vint32m4_t vs2, vint32m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tu(vint64m2_t vd, vint64m1_t vs2, vint64m1_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tu(vint64m4_t vd, vint64m2_t vs2, vint64m2_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tu(vint64m8_t vd, vint64m4_t vs2, vint64m4_t vs1,
+                                 size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tu(vuint8mf4_t vd, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tu(vuint8mf2_t vd, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tu(vuint8m1_t vd, vuint8mf2_t vs2, vuint8mf2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tu(vuint8m2_t vd, vuint8m1_t vs2, vuint8m1_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tu(vuint8m4_t vd, vuint8m2_t vs2, vuint8m2_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tu
+// CHECK-RV64-SAME: (<vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tu(vuint8m8_t vd, vuint8m4_t vs2, vuint8m4_t vs1,
+                                size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tu(vuint16mf2_t vd, vuint16mf4_t vs2,
+                                    vuint16mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tu(vuint16m1_t vd, vuint16mf2_t vs2,
+                                  vuint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tu(vuint16m2_t vd, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tu(vuint16m4_t vd, vuint16m2_t vs2,
+                                  vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tu
+// CHECK-RV64-SAME: (<vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tu(vuint16m8_t vd, vuint16m4_t vs2,
+                                  vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tu(vuint32m1_t vd, vuint32mf2_t vs2,
+                                  vuint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tu(vuint32m2_t vd, vuint32m1_t vs2,
+                                  vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tu(vuint32m4_t vd, vuint32m2_t vs2,
+                                  vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tu
+// CHECK-RV64-SAME: (<vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tu(vuint32m8_t vd, vuint32m4_t vs2,
+                                  vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tu
+// CHECK-RV64-SAME: (<vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tu(vuint64m2_t vd, vuint64m1_t vs2,
+                                  vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tu
+// CHECK-RV64-SAME: (<vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tu(vuint64m4_t vd, vuint64m2_t vs2,
+                                  vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tu
+// CHECK-RV64-SAME: (<vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], i64 [[VL]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tu(vuint64m8_t vd, vuint64m4_t vs2,
+                                  vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_tu(vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tum(vbool64_t vm, vfloat16mf2_t vd,
+                                      vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tum(vbool32_t vm, vfloat16m1_t vd,
+                                    vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tum(vbool16_t vm, vfloat16m2_t vd,
+                                    vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tum(vbool8_t vm, vfloat16m4_t vd,
+                                    vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tum(vbool4_t vm, vfloat16m8_t vd,
+                                    vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tum(vbool64_t vm, vfloat32m1_t vd,
+                                    vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tum(vbool32_t vm, vfloat32m2_t vd,
+                                    vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tum(vbool16_t vm, vfloat32m4_t vd,
+                                    vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tum(vbool8_t vm, vfloat32m8_t vd,
+                                    vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tum(vbool64_t vm, vfloat64m2_t vd,
+                                    vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tum(vbool32_t vm, vfloat64m4_t vd,
+                                    vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tum(vbool16_t vm, vfloat64m8_t vd,
+                                    vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tum(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                  vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tum(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                  vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tum(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                                vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tum(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                                vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tum(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                                vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tum(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                                vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tum(vbool64_t vm, vint16mf2_t vd,
+                                    vint16mf4_t vs2, vint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tum(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                  vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tum(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                  vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tum(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                  vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tum(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                  vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tum(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                  vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tum(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                  vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tum(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                  vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tum(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                  vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tum(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                  vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tum(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                  vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tum(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                  vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tum(vbool64_t vm, vuint8mf4_t vd,
+                                   vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tum(vbool32_t vm, vuint8mf2_t vd,
+                                   vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tum(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                 vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tum(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                 vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tum(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                 vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tum
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tum(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                 vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tum(vbool64_t vm, vuint16mf2_t vd,
+                                     vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tum(vbool32_t vm, vuint16m1_t vd,
+                                   vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tum(vbool16_t vm, vuint16m2_t vd,
+                                   vuint16m1_t vs2, vuint16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tum(vbool8_t vm, vuint16m4_t vd, vuint16m2_t vs2,
+                                   vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tum
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tum(vbool4_t vm, vuint16m8_t vd, vuint16m4_t vs2,
+                                   vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tum(vbool64_t vm, vuint32m1_t vd,
+                                   vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tum(vbool32_t vm, vuint32m2_t vd,
+                                   vuint32m1_t vs2, vuint32m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tum(vbool16_t vm, vuint32m4_t vd,
+                                   vuint32m2_t vs2, vuint32m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tum
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tum(vbool8_t vm, vuint32m8_t vd, vuint32m4_t vs2,
+                                   vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tum
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tum(vbool64_t vm, vuint64m2_t vd,
+                                   vuint64m1_t vs2, vuint64m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tum
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tum(vbool32_t vm, vuint64m4_t vd,
+                                   vuint64m2_t vs2, vuint64m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tum
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tum(vbool16_t vm, vuint64m8_t vd,
+                                   vuint64m4_t vs2, vuint64m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_tum(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_tumu(vbool64_t vm, vfloat16mf2_t vd,
+                                       vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                       size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_tumu(vbool32_t vm, vfloat16m1_t vd,
+                                     vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_tumu(vbool16_t vm, vfloat16m2_t vd,
+                                     vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_tumu(vbool8_t vm, vfloat16m4_t vd,
+                                     vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_tumu(vbool4_t vm, vfloat16m8_t vd,
+                                     vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_tumu(vbool64_t vm, vfloat32m1_t vd,
+                                     vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_tumu(vbool32_t vm, vfloat32m2_t vd,
+                                     vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_tumu(vbool16_t vm, vfloat32m4_t vd,
+                                     vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_tumu(vbool8_t vm, vfloat32m8_t vd,
+                                     vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_tumu(vbool64_t vm, vfloat64m2_t vd,
+                                     vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_tumu(vbool32_t vm, vfloat64m4_t vd,
+                                     vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_tumu(vbool16_t vm, vfloat64m8_t vd,
+                                     vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_tumu(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                   vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_tumu(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                   vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_tumu(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                                 vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_tumu(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                                 vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_tumu(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                                 vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_tumu(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                                 vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_tumu(vbool64_t vm, vint16mf2_t vd,
+                                     vint16mf4_t vs2, vint16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_tumu(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                   vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_tumu(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                   vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_tumu(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                   vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_tumu(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                   vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_tumu(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                   vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_tumu(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                   vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_tumu(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                   vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_tumu(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                   vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_tumu(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                   vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_tumu(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                   vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_tumu(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                   vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_tumu(vbool64_t vm, vuint8mf4_t vd,
+                                    vuint8mf8_t vs2, vuint8mf8_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_tumu(vbool32_t vm, vuint8mf2_t vd,
+                                    vuint8mf4_t vs2, vuint8mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_tumu(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                  vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_tumu(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                  vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_tumu(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                  vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_tumu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_tumu(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                  vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_tumu(vbool64_t vm, vuint16mf2_t vd,
+                                      vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                      size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_tumu(vbool32_t vm, vuint16m1_t vd,
+                                    vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_tumu(vbool16_t vm, vuint16m2_t vd,
+                                    vuint16m1_t vs2, vuint16m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_tumu(vbool8_t vm, vuint16m4_t vd,
+                                    vuint16m2_t vs2, vuint16m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_tumu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_tumu(vbool4_t vm, vuint16m8_t vd,
+                                    vuint16m4_t vs2, vuint16m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_tumu(vbool64_t vm, vuint32m1_t vd,
+                                    vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_tumu(vbool32_t vm, vuint32m2_t vd,
+                                    vuint32m1_t vs2, vuint32m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_tumu(vbool16_t vm, vuint32m4_t vd,
+                                    vuint32m2_t vs2, vuint32m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_tumu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_tumu(vbool8_t vm, vuint32m8_t vd,
+                                    vuint32m4_t vs2, vuint32m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_tumu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_tumu(vbool64_t vm, vuint64m2_t vd,
+                                    vuint64m1_t vs2, vuint64m1_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_tumu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_tumu(vbool32_t vm, vuint64m4_t vd,
+                                    vuint64m2_t vs2, vuint64m2_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_tumu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_tumu(vbool16_t vm, vuint64m8_t vd,
+                                    vuint64m4_t vs2, vuint64m4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_tumu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x half> @test_vzip_vv_f16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x half> [[VD:%.*]], <vscale x 1 x half> [[VS2:%.*]], <vscale x 1 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16.i64(<vscale x 2 x half> [[VD]], <vscale x 1 x half> [[VS2]], <vscale x 1 x half> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x half> [[TMP0]]
+//
+vfloat16mf2_t test_vzip_vv_f16mf2_mu(vbool64_t vm, vfloat16mf2_t vd,
+                                     vfloat16mf4_t vs2, vfloat16mf4_t vs1,
+                                     size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x half> @test_vzip_vv_f16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x half> [[VD:%.*]], <vscale x 2 x half> [[VS2:%.*]], <vscale x 2 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16.i64(<vscale x 4 x half> [[VD]], <vscale x 2 x half> [[VS2]], <vscale x 2 x half> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x half> [[TMP0]]
+//
+vfloat16m1_t test_vzip_vv_f16m1_mu(vbool32_t vm, vfloat16m1_t vd,
+                                   vfloat16mf2_t vs2, vfloat16mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x half> @test_vzip_vv_f16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x half> [[VD:%.*]], <vscale x 4 x half> [[VS2:%.*]], <vscale x 4 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16.i64(<vscale x 8 x half> [[VD]], <vscale x 4 x half> [[VS2]], <vscale x 4 x half> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x half> [[TMP0]]
+//
+vfloat16m2_t test_vzip_vv_f16m2_mu(vbool16_t vm, vfloat16m2_t vd,
+                                   vfloat16m1_t vs2, vfloat16m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x half> @test_vzip_vv_f16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x half> [[VD:%.*]], <vscale x 8 x half> [[VS2:%.*]], <vscale x 8 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16.i64(<vscale x 16 x half> [[VD]], <vscale x 8 x half> [[VS2]], <vscale x 8 x half> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x half> [[TMP0]]
+//
+vfloat16m4_t test_vzip_vv_f16m4_mu(vbool8_t vm, vfloat16m4_t vd,
+                                   vfloat16m2_t vs2, vfloat16m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x half> @test_vzip_vv_f16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x half> [[VD:%.*]], <vscale x 16 x half> [[VS2:%.*]], <vscale x 16 x half> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16.i64(<vscale x 32 x half> [[VD]], <vscale x 16 x half> [[VS2]], <vscale x 16 x half> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x half> [[TMP0]]
+//
+vfloat16m8_t test_vzip_vv_f16m8_mu(vbool4_t vm, vfloat16m8_t vd,
+                                   vfloat16m4_t vs2, vfloat16m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x float> @test_vzip_vv_f32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x float> [[VD:%.*]], <vscale x 1 x float> [[VS2:%.*]], <vscale x 1 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32.i64(<vscale x 2 x float> [[VD]], <vscale x 1 x float> [[VS2]], <vscale x 1 x float> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
+//
+vfloat32m1_t test_vzip_vv_f32m1_mu(vbool64_t vm, vfloat32m1_t vd,
+                                   vfloat32mf2_t vs2, vfloat32mf2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x float> @test_vzip_vv_f32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x float> [[VD:%.*]], <vscale x 2 x float> [[VS2:%.*]], <vscale x 2 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32.i64(<vscale x 4 x float> [[VD]], <vscale x 2 x float> [[VS2]], <vscale x 2 x float> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
+//
+vfloat32m2_t test_vzip_vv_f32m2_mu(vbool32_t vm, vfloat32m2_t vd,
+                                   vfloat32m1_t vs2, vfloat32m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x float> @test_vzip_vv_f32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x float> [[VD:%.*]], <vscale x 4 x float> [[VS2:%.*]], <vscale x 4 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32.i64(<vscale x 8 x float> [[VD]], <vscale x 4 x float> [[VS2]], <vscale x 4 x float> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
+//
+vfloat32m4_t test_vzip_vv_f32m4_mu(vbool16_t vm, vfloat32m4_t vd,
+                                   vfloat32m2_t vs2, vfloat32m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x float> @test_vzip_vv_f32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x float> [[VD:%.*]], <vscale x 8 x float> [[VS2:%.*]], <vscale x 8 x float> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32.i64(<vscale x 16 x float> [[VD]], <vscale x 8 x float> [[VS2]], <vscale x 8 x float> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
+//
+vfloat32m8_t test_vzip_vv_f32m8_mu(vbool8_t vm, vfloat32m8_t vd,
+                                   vfloat32m4_t vs2, vfloat32m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x double> @test_vzip_vv_f64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x double> [[VD:%.*]], <vscale x 1 x double> [[VS2:%.*]], <vscale x 1 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64.i64(<vscale x 2 x double> [[VD]], <vscale x 1 x double> [[VS2]], <vscale x 1 x double> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
+//
+vfloat64m2_t test_vzip_vv_f64m2_mu(vbool64_t vm, vfloat64m2_t vd,
+                                   vfloat64m1_t vs2, vfloat64m1_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x double> @test_vzip_vv_f64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x double> [[VD:%.*]], <vscale x 2 x double> [[VS2:%.*]], <vscale x 2 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64.i64(<vscale x 4 x double> [[VD]], <vscale x 2 x double> [[VS2]], <vscale x 2 x double> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
+//
+vfloat64m4_t test_vzip_vv_f64m4_mu(vbool32_t vm, vfloat64m4_t vd,
+                                   vfloat64m2_t vs2, vfloat64m2_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x double> @test_vzip_vv_f64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x double> [[VD:%.*]], <vscale x 4 x double> [[VS2:%.*]], <vscale x 4 x double> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64.i64(<vscale x 8 x double> [[VD]], <vscale x 4 x double> [[VS2]], <vscale x 4 x double> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
+//
+vfloat64m8_t test_vzip_vv_f64m8_mu(vbool16_t vm, vfloat64m8_t vd,
+                                   vfloat64m4_t vs2, vfloat64m4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_i8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vzip_vv_i8mf4_mu(vbool64_t vm, vint8mf4_t vd, vint8mf8_t vs2,
+                                 vint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_i8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vzip_vv_i8mf2_mu(vbool32_t vm, vint8mf2_t vd, vint8mf4_t vs2,
+                                 vint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_i8m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vzip_vv_i8m1_mu(vbool16_t vm, vint8m1_t vd, vint8mf2_t vs2,
+                               vint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_i8m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vzip_vv_i8m2_mu(vbool8_t vm, vint8m2_t vd, vint8m1_t vs2,
+                               vint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_i8m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vzip_vv_i8m4_mu(vbool4_t vm, vint8m4_t vd, vint8m2_t vs2,
+                               vint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_i8m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vzip_vv_i8m8_mu(vbool2_t vm, vint8m8_t vd, vint8m4_t vs2,
+                               vint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_i16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vzip_vv_i16mf2_mu(vbool64_t vm, vint16mf2_t vd,
+                                   vint16mf4_t vs2, vint16mf4_t vs1,
+                                   size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_i16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vzip_vv_i16m1_mu(vbool32_t vm, vint16m1_t vd, vint16mf2_t vs2,
+                                 vint16mf2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_i16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vzip_vv_i16m2_mu(vbool16_t vm, vint16m2_t vd, vint16m1_t vs2,
+                                 vint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_i16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vzip_vv_i16m4_mu(vbool8_t vm, vint16m4_t vd, vint16m2_t vs2,
+                                 vint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_i16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vzip_vv_i16m8_mu(vbool4_t vm, vint16m8_t vd, vint16m4_t vs2,
+                                 vint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_i32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vzip_vv_i32m1_mu(vbool64_t vm, vint32m1_t vd, vint32mf2_t vs2,
+                                 vint32mf2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_i32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vzip_vv_i32m2_mu(vbool32_t vm, vint32m2_t vd, vint32m1_t vs2,
+                                 vint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_i32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vzip_vv_i32m4_mu(vbool16_t vm, vint32m4_t vd, vint32m2_t vs2,
+                                 vint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_i32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vzip_vv_i32m8_mu(vbool8_t vm, vint32m8_t vd, vint32m4_t vs2,
+                                 vint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_i64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vzip_vv_i64m2_mu(vbool64_t vm, vint64m2_t vd, vint64m1_t vs2,
+                                 vint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_i64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vzip_vv_i64m4_mu(vbool32_t vm, vint64m4_t vd, vint64m2_t vs2,
+                                 vint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_i64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vzip_vv_i64m8_mu(vbool16_t vm, vint64m8_t vd, vint64m4_t vs2,
+                                 vint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i8> @test_vzip_vv_u8mf4_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i8> [[VD:%.*]], <vscale x 1 x i8> [[VS2:%.*]], <vscale x 1 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8.i64(<vscale x 2 x i8> [[VD]], <vscale x 1 x i8> [[VS2]], <vscale x 1 x i8> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vzip_vv_u8mf4_mu(vbool64_t vm, vuint8mf4_t vd, vuint8mf8_t vs2,
+                                  vuint8mf8_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i8> @test_vzip_vv_u8mf2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i8> [[VD:%.*]], <vscale x 2 x i8> [[VS2:%.*]], <vscale x 2 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8.i64(<vscale x 4 x i8> [[VD]], <vscale x 2 x i8> [[VS2]], <vscale x 2 x i8> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vzip_vv_u8mf2_mu(vbool32_t vm, vuint8mf2_t vd, vuint8mf4_t vs2,
+                                  vuint8mf4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i8> @test_vzip_vv_u8m1_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i8> [[VD:%.*]], <vscale x 4 x i8> [[VS2:%.*]], <vscale x 4 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8.i64(<vscale x 8 x i8> [[VD]], <vscale x 4 x i8> [[VS2]], <vscale x 4 x i8> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vzip_vv_u8m1_mu(vbool16_t vm, vuint8m1_t vd, vuint8mf2_t vs2,
+                                vuint8mf2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i8> @test_vzip_vv_u8m2_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i8> [[VD:%.*]], <vscale x 8 x i8> [[VS2:%.*]], <vscale x 8 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8.i64(<vscale x 16 x i8> [[VD]], <vscale x 8 x i8> [[VS2]], <vscale x 8 x i8> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vzip_vv_u8m2_mu(vbool8_t vm, vuint8m2_t vd, vuint8m1_t vs2,
+                                vuint8m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i8> @test_vzip_vv_u8m4_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i8> [[VD:%.*]], <vscale x 16 x i8> [[VS2:%.*]], <vscale x 16 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8.i64(<vscale x 32 x i8> [[VD]], <vscale x 16 x i8> [[VS2]], <vscale x 16 x i8> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vzip_vv_u8m4_mu(vbool4_t vm, vuint8m4_t vd, vuint8m2_t vs2,
+                                vuint8m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 64 x i8> @test_vzip_vv_u8m8_mu
+// CHECK-RV64-SAME: (<vscale x 32 x i1> [[VM:%.*]], <vscale x 64 x i8> [[VD:%.*]], <vscale x 32 x i8> [[VS2:%.*]], <vscale x 32 x i8> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8.i64(<vscale x 64 x i8> [[VD]], <vscale x 32 x i8> [[VS2]], <vscale x 32 x i8> [[VS1]], <vscale x 32 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vzip_vv_u8m8_mu(vbool2_t vm, vuint8m8_t vd, vuint8m4_t vs2,
+                                vuint8m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i16> @test_vzip_vv_u16mf2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i16> [[VD:%.*]], <vscale x 1 x i16> [[VS2:%.*]], <vscale x 1 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16.i64(<vscale x 2 x i16> [[VD]], <vscale x 1 x i16> [[VS2]], <vscale x 1 x i16> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vzip_vv_u16mf2_mu(vbool64_t vm, vuint16mf2_t vd,
+                                    vuint16mf4_t vs2, vuint16mf4_t vs1,
+                                    size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i16> @test_vzip_vv_u16m1_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i16> [[VD:%.*]], <vscale x 2 x i16> [[VS2:%.*]], <vscale x 2 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16.i64(<vscale x 4 x i16> [[VD]], <vscale x 2 x i16> [[VS2]], <vscale x 2 x i16> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vzip_vv_u16m1_mu(vbool32_t vm, vuint16m1_t vd,
+                                  vuint16mf2_t vs2, vuint16mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i16> @test_vzip_vv_u16m2_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i16> [[VD:%.*]], <vscale x 4 x i16> [[VS2:%.*]], <vscale x 4 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16.i64(<vscale x 8 x i16> [[VD]], <vscale x 4 x i16> [[VS2]], <vscale x 4 x i16> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vzip_vv_u16m2_mu(vbool16_t vm, vuint16m2_t vd, vuint16m1_t vs2,
+                                  vuint16m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i16> @test_vzip_vv_u16m4_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i16> [[VD:%.*]], <vscale x 8 x i16> [[VS2:%.*]], <vscale x 8 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16.i64(<vscale x 16 x i16> [[VD]], <vscale x 8 x i16> [[VS2]], <vscale x 8 x i16> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vzip_vv_u16m4_mu(vbool8_t vm, vuint16m4_t vd, vuint16m2_t vs2,
+                                  vuint16m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 32 x i16> @test_vzip_vv_u16m8_mu
+// CHECK-RV64-SAME: (<vscale x 16 x i1> [[VM:%.*]], <vscale x 32 x i16> [[VD:%.*]], <vscale x 16 x i16> [[VS2:%.*]], <vscale x 16 x i16> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16.i64(<vscale x 32 x i16> [[VD]], <vscale x 16 x i16> [[VS2]], <vscale x 16 x i16> [[VS1]], <vscale x 16 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vzip_vv_u16m8_mu(vbool4_t vm, vuint16m8_t vd, vuint16m4_t vs2,
+                                  vuint16m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i32> @test_vzip_vv_u32m1_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], <vscale x 1 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32.i64(<vscale x 2 x i32> [[VD]], <vscale x 1 x i32> [[VS2]], <vscale x 1 x i32> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vzip_vv_u32m1_mu(vbool64_t vm, vuint32m1_t vd,
+                                  vuint32mf2_t vs2, vuint32mf2_t vs1,
+                                  size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i32> @test_vzip_vv_u32m2_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], <vscale x 2 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32.i64(<vscale x 4 x i32> [[VD]], <vscale x 2 x i32> [[VS2]], <vscale x 2 x i32> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vzip_vv_u32m2_mu(vbool32_t vm, vuint32m2_t vd, vuint32m1_t vs2,
+                                  vuint32m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i32> @test_vzip_vv_u32m4_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], <vscale x 4 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32.i64(<vscale x 8 x i32> [[VD]], <vscale x 4 x i32> [[VS2]], <vscale x 4 x i32> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vzip_vv_u32m4_mu(vbool16_t vm, vuint32m4_t vd, vuint32m2_t vs2,
+                                  vuint32m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 16 x i32> @test_vzip_vv_u32m8_mu
+// CHECK-RV64-SAME: (<vscale x 8 x i1> [[VM:%.*]], <vscale x 16 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], <vscale x 8 x i32> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32.i64(<vscale x 16 x i32> [[VD]], <vscale x 8 x i32> [[VS2]], <vscale x 8 x i32> [[VS1]], <vscale x 8 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vzip_vv_u32m8_mu(vbool8_t vm, vuint32m8_t vd, vuint32m4_t vs2,
+                                  vuint32m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 2 x i64> @test_vzip_vv_u64m2_mu
+// CHECK-RV64-SAME: (<vscale x 1 x i1> [[VM:%.*]], <vscale x 2 x i64> [[VD:%.*]], <vscale x 1 x i64> [[VS2:%.*]], <vscale x 1 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64.i64(<vscale x 2 x i64> [[VD]], <vscale x 1 x i64> [[VS2]], <vscale x 1 x i64> [[VS1]], <vscale x 1 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vzip_vv_u64m2_mu(vbool64_t vm, vuint64m2_t vd, vuint64m1_t vs2,
+                                  vuint64m1_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 4 x i64> @test_vzip_vv_u64m4_mu
+// CHECK-RV64-SAME: (<vscale x 2 x i1> [[VM:%.*]], <vscale x 4 x i64> [[VD:%.*]], <vscale x 2 x i64> [[VS2:%.*]], <vscale x 2 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64.i64(<vscale x 4 x i64> [[VD]], <vscale x 2 x i64> [[VS2]], <vscale x 2 x i64> [[VS1]], <vscale x 2 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vzip_vv_u64m4_mu(vbool32_t vm, vuint64m4_t vd, vuint64m2_t vs2,
+                                  vuint64m2_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
+
+// CHECK-RV64-LABEL: define dso_local <vscale x 8 x i64> @test_vzip_vv_u64m8_mu
+// CHECK-RV64-SAME: (<vscale x 4 x i1> [[VM:%.*]], <vscale x 8 x i64> [[VD:%.*]], <vscale x 4 x i64> [[VS2:%.*]], <vscale x 4 x i64> [[VS1:%.*]], i64 noundef [[VL:%.*]]) #[[ATTR0]] {
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64.i64(<vscale x 8 x i64> [[VD]], <vscale x 4 x i64> [[VS2]], <vscale x 4 x i64> [[VS1]], <vscale x 4 x i1> [[VM]], i64 [[VL]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vzip_vv_u64m8_mu(vbool16_t vm, vuint64m8_t vd, vuint64m4_t vs2,
+                                  vuint64m4_t vs1, size_t vl) {
+  return __riscv_vzip_mu(vm, vd, vs2, vs1, vl);
+}
diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index 32de1a10a4fc3..caf8fa6f9be81 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1940,6 +1940,59 @@ let TargetPrefix = "riscv" in {
   defm vwabdau : RISCVTernaryWide;
 } // TargetPrefix = "riscv"
 
+// Zvzip - Reordering Structured Data in Vector Registers
+//===----------------------------------------------------------------------===//
+let TargetPrefix = "riscv" in {
+  multiclass RISCVZip {
+    // Input: (passthru, vector_in, vector_in, vl)
+    def "int_riscv_" # NAME :
+      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                            [LLVMMatchType<0>, llvm_anyvector_ty,
+                             LLVMMatchType<1>, llvm_anyint_ty],
+                            [IntrNoMem]>, RISCVVIntrinsic {
+      let VLOperand = 3;
+    }
+
+    // Input: (maskedoff, vector_in, vector_in, mask, vl, policy)
+    def "int_riscv_" # NAME # "_mask" :
+      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                            [LLVMMatchType<0>, llvm_anyvector_ty,
+                             LLVMMatchType<1>,
+                             LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>,
+                             llvm_anyint_ty, LLVMMatchType<2>],
+                            [ImmArg<ArgIndex<5>>, IntrNoMem]>, RISCVVIntrinsic {
+      let VLOperand = 4;
+    }
+  }
+
+  multiclass RISCVUnzip {
+    // Input: (passthru, vector_in, vl)
+    def "int_riscv_" # NAME :
+      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                            [LLVMMatchType<0>, llvm_anyvector_ty,
+                             llvm_anyint_ty],
+                            [IntrNoMem]>, RISCVVIntrinsic {
+      let VLOperand = 2;
+    }
+
+    // Input: (maskedoff, vector_in, mask, vl, policy)
+    def "int_riscv_" # NAME # "_mask" :
+      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
+                            [LLVMMatchType<0>, llvm_anyvector_ty,
+                             LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>,
+                             llvm_anyint_ty, LLVMMatchType<2>],
+                            [ImmArg<ArgIndex<4>>, IntrNoMem]>, RISCVVIntrinsic {
+      let VLOperand = 3;
+    }
+  }
+
+  defm vzip    : RISCVZip;
+  defm vunzipe : RISCVUnzip;
+  defm vunzipo : RISCVUnzip;
+  defm vpaire  : RISCVBinaryAAA;
+  defm vpairo  : RISCVBinaryAAA;
+}
+
 //===----------------------------------------------------------------------===//
 // Zvdot4a8i - Vector 4-element Dot Product of packed 8-bit Integers
 //
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
index 9fd88ee0ef104..bc838fbfd95a9 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
@@ -29,3 +29,119 @@ let Predicates = [HasStdExtZvzip], Constraints = "@earlyclobber $vd" in {
     def VPAIRO_VV : VALUVV<0b001111, OPMVV, "vpairo.vv">;
   }
 } // Predicates = [HasStdExtZvzip]
+
+defset list<VTypeInfoToWide> AllZvzipVectors = {
+  def : VTypeInfoToWide<VI8MF8,  VI8MF4>;
+  def : VTypeInfoToWide<VI8MF4,  VI8MF2>;
+  def : VTypeInfoToWide<VI8MF2,  VI8M1>;
+  def : VTypeInfoToWide<VI8M1,   VI8M2>;
+  def : VTypeInfoToWide<VI8M2,   VI8M4>;
+  def : VTypeInfoToWide<VI8M4,   VI8M8>;
+
+  def : VTypeInfoToWide<VI16MF4, VI16MF2>;
+  def : VTypeInfoToWide<VI16MF2, VI16M1>;
+  def : VTypeInfoToWide<VI16M1,  VI16M2>;
+  def : VTypeInfoToWide<VI16M2,  VI16M4>;
+  def : VTypeInfoToWide<VI16M4,  VI16M8>;
+
+  def : VTypeInfoToWide<VI32MF2, VI32M1>;
+  def : VTypeInfoToWide<VI32M1,  VI32M2>;
+  def : VTypeInfoToWide<VI32M2,  VI32M4>;
+  def : VTypeInfoToWide<VI32M4,  VI32M8>;
+
+  def : VTypeInfoToWide<VI64M1,  VI64M2>;
+  def : VTypeInfoToWide<VI64M2,  VI64M4>;
+  def : VTypeInfoToWide<VI64M4,  VI64M8>;
+
+  // Floating-point 16-bit
+  def : VTypeInfoToWide<VF16MF4, VF16MF2>;
+  def : VTypeInfoToWide<VF16MF2, VF16M1>;
+  def : VTypeInfoToWide<VF16M1,  VF16M2>;
+  def : VTypeInfoToWide<VF16M2,  VF16M4>;
+  def : VTypeInfoToWide<VF16M4,  VF16M8>;
+
+  // Floating-point 32-bit
+  def : VTypeInfoToWide<VF32MF2, VF32M1>;
+  def : VTypeInfoToWide<VF32M1,  VF32M2>;
+  def : VTypeInfoToWide<VF32M2,  VF32M4>;
+  def : VTypeInfoToWide<VF32M4,  VF32M8>;
+
+  // Floating-point 64-bit
+  def : VTypeInfoToWide<VF64M1,  VF64M2>;
+  def : VTypeInfoToWide<VF64M2,  VF64M4>;
+  def : VTypeInfoToWide<VF64M4,  VF64M8>;
+
+  // BF16 (16-bit)
+  def : VTypeInfoToWide<VBF16MF4, VBF16MF2>;
+  def : VTypeInfoToWide<VBF16MF2, VBF16M1>;
+  def : VTypeInfoToWide<VBF16M1,  VBF16M2>;
+  def : VTypeInfoToWide<VBF16M2,  VBF16M4>;
+  def : VTypeInfoToWide<VBF16M4,  VBF16M8>;
+}
+
+multiclass VPseudoVZIP {
+  foreach m = MxListW in
+    defm "" : VPseudoBinaryW_VV<m, Commutable=0>,
+              SchedBinary<"WriteVIWALUV", "ReadVIWALUV", "ReadVIWALUV", m.MX,
+                          forcePassthruRead=true>;
+}
+
+multiclass VPseudoVUNZIP {
+  foreach m = MxListW in {
+    defvar mx = m.MX;
+    let VLMul = m.value in {
+      def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.wvrclass>,
+                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+      def "_V_" # mx # "_MASK" :
+        VPseudoUnaryMask<m.vrclass, m.wvrclass>,
+        RISCVMaskedPseudo<MaskIdx=2>,
+        SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+    }
+  }
+}
+
+let Predicates = [HasStdExtZvzip],
+    Constraints = "@earlyclobber $rd, $rd = $passthru" in {
+  defm PseudoVZIP    : VPseudoVZIP;
+  defm PseudoVUNZIPE : VPseudoVUNZIP;
+  defm PseudoVUNZIPO : VPseudoVUNZIP;
+  defm PseudoVPAIRE  : VPseudoVALU_VV;
+  defm PseudoVPAIRO  : VPseudoVALU_VV;
+}
+
+multiclass VPatVUNZIPIntrinsic<string intrinsic_name, string instruction_name> {
+  foreach VtiToWti = AllZvzipVectors in {
+    defvar vti = VtiToWti.Vti;
+    defvar wti = VtiToWti.Wti;
+    let Predicates = !listconcat(GetVTypePredicates<wti>.Predicates, [HasStdExtZvzip]) in {
+      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic_name)
+                              (vti.Vector vti.RegClass:$passthru),
+                              (wti.Vector wti.RegClass:$rs2),
+                              VLOpFrag)),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX)
+                  vti.RegClass:$passthru,
+                  wti.RegClass:$rs2,
+                  GPR:$vl,
+                  vti.Log2SEW,
+                  TAIL_AGNOSTIC)>;
+      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic_name # "_mask")
+                              (vti.Vector vti.RegClass:$passthru),
+                              (wti.Vector wti.RegClass:$rs2),
+                              (vti.Mask VMV0:$vm),
+                              VLOpFrag, (XLenVT timm:$policy))),
+                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
+                  vti.RegClass:$passthru,
+                  wti.RegClass:$rs2,
+                  (vti.Mask VMV0:$vm),
+                  GPR:$vl,
+                  vti.Log2SEW,
+                  timm:$policy)>;
+    }
+  }
+}
+
+defm : VPatVUNZIPIntrinsic<"int_riscv_vunzipe", "PseudoVUNZIPE">;
+defm : VPatVUNZIPIntrinsic<"int_riscv_vunzipo", "PseudoVUNZIPO">;
+defm : VPatBinaryV_VV<"int_riscv_vpaire", "PseudoVPAIRE", AllVectors>;
+defm : VPatBinaryV_VV<"int_riscv_vpairo", "PseudoVPAIRO", AllVectors>;
+defm : VPatBinaryW_VV<"int_riscv_vzip", "PseudoVZIP", AllZvzipVectors>;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpaire.ll b/llvm/test/CodeGen/RISCV/rvv/vpaire.ll
new file mode 100644
index 0000000000000..27c5d361a3650
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vpaire.ll
@@ -0,0 +1,1115 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @test_vpaire_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.iXLen(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @test_vpaire_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vpaire_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.iXLen(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vpaire_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vpaire_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.iXLen(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vpaire_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vpaire_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.iXLen(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vpaire_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vpaire_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.iXLen(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vpaire_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vpaire_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.iXLen(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vpaire_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vpaire_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.iXLen(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 64 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vpaire_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2, <vscale x 64 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 64 x i8> %arg2,
+    <vscale x 64 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @test_vpaire_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.iXLen(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @test_vpaire_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vpaire_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.iXLen(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vpaire_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vpaire_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.iXLen(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vpaire_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vpaire_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.iXLen(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vpaire_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vpaire_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.iXLen(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vpaire_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vpaire_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 32 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.iXLen(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 32 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vpaire_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 32 x i16> %arg2, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 32 x i16> %arg2,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @test_vpaire_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.iXLen(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @test_vpaire_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vpaire_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.iXLen(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vpaire_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vpaire_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.iXLen(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vpaire_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vpaire_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.iXLen(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vpaire_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vpaire_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 16 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.iXLen(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 16 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vpaire_mask_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 16 x i32> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 16 x i32> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @test_vpaire_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.iXLen(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @test_vpaire_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vpaire_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.iXLen(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vpaire_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vpaire_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.iXLen(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vpaire_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vpaire_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 8 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.iXLen(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 8 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vpaire_mask_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 8 x i64> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 8 x i64> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i64> %a
+}
+
+; Floating-point types
+
+; f16
+define <vscale x 1 x half> @test_vpaire_f16mf4(<vscale x 1 x half> %passthru, <vscale x 1 x half> %a1, <vscale x 1 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.iXLen(
+    <vscale x 1 x half> %passthru,
+    <vscale x 1 x half> %a1,
+    <vscale x 1 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @test_vpaire_mask_f16mf4(<vscale x 1 x half> %passthru, <vscale x 1 x half> %a1, <vscale x 1 x half> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16(
+    <vscale x 1 x half> %passthru,
+    <vscale x 1 x half> %a1,
+    <vscale x 1 x half> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 2 x half> @test_vpaire_f16mf2(<vscale x 2 x half> %passthru, <vscale x 2 x half> %a1, <vscale x 2 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.iXLen(
+    <vscale x 2 x half> %passthru,
+    <vscale x 2 x half> %a1,
+    <vscale x 2 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @test_vpaire_mask_f16mf2(<vscale x 2 x half> %passthru, <vscale x 2 x half> %a1, <vscale x 2 x half> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16(
+    <vscale x 2 x half> %passthru,
+    <vscale x 2 x half> %a1,
+    <vscale x 2 x half> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 4 x half> @test_vpaire_f16m1(<vscale x 4 x half> %passthru, <vscale x 4 x half> %a1, <vscale x 4 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.iXLen(
+    <vscale x 4 x half> %passthru,
+    <vscale x 4 x half> %a1,
+    <vscale x 4 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @test_vpaire_mask_f16m1(<vscale x 4 x half> %passthru, <vscale x 4 x half> %a1, <vscale x 4 x half> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16(
+    <vscale x 4 x half> %passthru,
+    <vscale x 4 x half> %a1,
+    <vscale x 4 x half> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 8 x half> @test_vpaire_f16m2(<vscale x 8 x half> %passthru, <vscale x 8 x half> %a1, <vscale x 8 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.iXLen(
+    <vscale x 8 x half> %passthru,
+    <vscale x 8 x half> %a1,
+    <vscale x 8 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @test_vpaire_mask_f16m2(<vscale x 8 x half> %passthru, <vscale x 8 x half> %a1, <vscale x 8 x half> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16(
+    <vscale x 8 x half> %passthru,
+    <vscale x 8 x half> %a1,
+    <vscale x 8 x half> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 16 x half> @test_vpaire_f16m4(<vscale x 16 x half> %passthru, <vscale x 16 x half> %a1, <vscale x 16 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.iXLen(
+    <vscale x 16 x half> %passthru,
+    <vscale x 16 x half> %a1,
+    <vscale x 16 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @test_vpaire_mask_f16m4(<vscale x 16 x half> %passthru, <vscale x 16 x half> %a1, <vscale x 16 x half> %a2, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16(
+    <vscale x 16 x half> %passthru,
+    <vscale x 16 x half> %a1,
+    <vscale x 16 x half> %a2,
+    <vscale x 16 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 32 x half> @test_vpaire_f16m8(<vscale x 32 x half> %passthru, <vscale x 32 x half> %a1, <vscale x 32 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.iXLen(
+    <vscale x 32 x half> %passthru,
+    <vscale x 32 x half> %a1,
+    <vscale x 32 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @test_vpaire_mask_f16m8(<vscale x 32 x half> %passthru, <vscale x 32 x half> %a1, <vscale x 32 x half> %a2, <vscale x 32 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16(
+    <vscale x 32 x half> %passthru,
+    <vscale x 32 x half> %a1,
+    <vscale x 32 x half> %a2,
+    <vscale x 32 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 32 x half> %r
+}
+
+; f32
+define <vscale x 1 x float> @test_vpaire_f32mf2(<vscale x 1 x float> %passthru, <vscale x 1 x float> %a1, <vscale x 1 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.iXLen(
+    <vscale x 1 x float> %passthru,
+    <vscale x 1 x float> %a1,
+    <vscale x 1 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @test_vpaire_mask_f32mf2(<vscale x 1 x float> %passthru, <vscale x 1 x float> %a1, <vscale x 1 x float> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32(
+    <vscale x 1 x float> %passthru,
+    <vscale x 1 x float> %a1,
+    <vscale x 1 x float> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 2 x float> @test_vpaire_f32m1(<vscale x 2 x float> %passthru, <vscale x 2 x float> %a1, <vscale x 2 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.iXLen(
+    <vscale x 2 x float> %passthru,
+    <vscale x 2 x float> %a1,
+    <vscale x 2 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @test_vpaire_mask_f32m1(<vscale x 2 x float> %passthru, <vscale x 2 x float> %a1, <vscale x 2 x float> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32(
+    <vscale x 2 x float> %passthru,
+    <vscale x 2 x float> %a1,
+    <vscale x 2 x float> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 4 x float> @test_vpaire_f32m2(<vscale x 4 x float> %passthru, <vscale x 4 x float> %a1, <vscale x 4 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.iXLen(
+    <vscale x 4 x float> %passthru,
+    <vscale x 4 x float> %a1,
+    <vscale x 4 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @test_vpaire_mask_f32m2(<vscale x 4 x float> %passthru, <vscale x 4 x float> %a1, <vscale x 4 x float> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32(
+    <vscale x 4 x float> %passthru,
+    <vscale x 4 x float> %a1,
+    <vscale x 4 x float> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 8 x float> @test_vpaire_f32m4(<vscale x 8 x float> %passthru, <vscale x 8 x float> %a1, <vscale x 8 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.iXLen(
+    <vscale x 8 x float> %passthru,
+    <vscale x 8 x float> %a1,
+    <vscale x 8 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @test_vpaire_mask_f32m4(<vscale x 8 x float> %passthru, <vscale x 8 x float> %a1, <vscale x 8 x float> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32(
+    <vscale x 8 x float> %passthru,
+    <vscale x 8 x float> %a1,
+    <vscale x 8 x float> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 16 x float> @test_vpaire_f32m8(<vscale x 16 x float> %passthru, <vscale x 16 x float> %a1, <vscale x 16 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.iXLen(
+    <vscale x 16 x float> %passthru,
+    <vscale x 16 x float> %a1,
+    <vscale x 16 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @test_vpaire_mask_f32m8(<vscale x 16 x float> %passthru, <vscale x 16 x float> %a1, <vscale x 16 x float> %a2, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32(
+    <vscale x 16 x float> %passthru,
+    <vscale x 16 x float> %a1,
+    <vscale x 16 x float> %a2,
+    <vscale x 16 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 16 x float> %r
+}
+
+; f64
+define <vscale x 1 x double> @test_vpaire_f64m1(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a1, <vscale x 1 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.iXLen(
+    <vscale x 1 x double> %passthru,
+    <vscale x 1 x double> %a1,
+    <vscale x 1 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @test_vpaire_mask_f64m1(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a1, <vscale x 1 x double> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64(
+    <vscale x 1 x double> %passthru,
+    <vscale x 1 x double> %a1,
+    <vscale x 1 x double> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 2 x double> @test_vpaire_f64m2(<vscale x 2 x double> %passthru, <vscale x 2 x double> %a1, <vscale x 2 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.iXLen(
+    <vscale x 2 x double> %passthru,
+    <vscale x 2 x double> %a1,
+    <vscale x 2 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @test_vpaire_mask_f64m2(<vscale x 2 x double> %passthru, <vscale x 2 x double> %a1, <vscale x 2 x double> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64(
+    <vscale x 2 x double> %passthru,
+    <vscale x 2 x double> %a1,
+    <vscale x 2 x double> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 4 x double> @test_vpaire_f64m4(<vscale x 4 x double> %passthru, <vscale x 4 x double> %a1, <vscale x 4 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.iXLen(
+    <vscale x 4 x double> %passthru,
+    <vscale x 4 x double> %a1,
+    <vscale x 4 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @test_vpaire_mask_f64m4(<vscale x 4 x double> %passthru, <vscale x 4 x double> %a1, <vscale x 4 x double> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64(
+    <vscale x 4 x double> %passthru,
+    <vscale x 4 x double> %a1,
+    <vscale x 4 x double> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 8 x double> @test_vpaire_f64m8(<vscale x 8 x double> %passthru, <vscale x 8 x double> %a1, <vscale x 8 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_f64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vpaire.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.iXLen(
+    <vscale x 8 x double> %passthru,
+    <vscale x 8 x double> %a1,
+    <vscale x 8 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @test_vpaire_mask_f64m8(<vscale x 8 x double> %passthru, <vscale x 8 x double> %a1, <vscale x 8 x double> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpaire_mask_f64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64(
+    <vscale x 8 x double> %passthru,
+    <vscale x 8 x double> %a1,
+    <vscale x 8 x double> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x double> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpairo.ll b/llvm/test/CodeGen/RISCV/rvv/vpairo.ll
new file mode 100644
index 0000000000000..f814e3d1207f4
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vpairo.ll
@@ -0,0 +1,1115 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @test_vpairo_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.iXLen(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @test_vpairo_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vpairo_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.iXLen(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vpairo_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vpairo_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.iXLen(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vpairo_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vpairo_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.iXLen(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vpairo_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vpairo_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.iXLen(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vpairo_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vpairo_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.iXLen(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vpairo_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vpairo_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.iXLen(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 64 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vpairo_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 64 x i8> %arg2, <vscale x 64 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8r.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 64 x i8> %arg2,
+    <vscale x 64 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 1 x i16> @test_vpairo_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.iXLen(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @test_vpairo_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vpairo_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.iXLen(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vpairo_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vpairo_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.iXLen(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vpairo_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vpairo_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.iXLen(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vpairo_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vpairo_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.iXLen(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vpairo_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vpairo_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 32 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.iXLen(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 32 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vpairo_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 32 x i16> %arg2, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 32 x i16> %arg2,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 1 x i32> @test_vpairo_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.iXLen(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @test_vpairo_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vpairo_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.iXLen(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vpairo_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vpairo_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.iXLen(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vpairo_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vpairo_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.iXLen(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vpairo_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vpairo_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 16 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.iXLen(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 16 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vpairo_mask_i32m8(<vscale x 16 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 16 x i32> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 16 x i32> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 1 x i64> @test_vpairo_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.iXLen(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @test_vpairo_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vpairo_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.iXLen(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vpairo_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vpairo_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.iXLen(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vpairo_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vpairo_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 8 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.iXLen(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 8 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vpairo_mask_i64m8(<vscale x 8 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 8 x i64> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 8 x i64> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i64> %a
+}
+
+; Floating-point types
+
+; f16
+define <vscale x 1 x half> @test_vpairo_f16mf4(<vscale x 1 x half> %passthru, <vscale x 1 x half> %a1, <vscale x 1 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.iXLen(
+    <vscale x 1 x half> %passthru,
+    <vscale x 1 x half> %a1,
+    <vscale x 1 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @test_vpairo_mask_f16mf4(<vscale x 1 x half> %passthru, <vscale x 1 x half> %a1, <vscale x 1 x half> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16(
+    <vscale x 1 x half> %passthru,
+    <vscale x 1 x half> %a1,
+    <vscale x 1 x half> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 2 x half> @test_vpairo_f16mf2(<vscale x 2 x half> %passthru, <vscale x 2 x half> %a1, <vscale x 2 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.iXLen(
+    <vscale x 2 x half> %passthru,
+    <vscale x 2 x half> %a1,
+    <vscale x 2 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @test_vpairo_mask_f16mf2(<vscale x 2 x half> %passthru, <vscale x 2 x half> %a1, <vscale x 2 x half> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16(
+    <vscale x 2 x half> %passthru,
+    <vscale x 2 x half> %a1,
+    <vscale x 2 x half> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 4 x half> @test_vpairo_f16m1(<vscale x 4 x half> %passthru, <vscale x 4 x half> %a1, <vscale x 4 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.iXLen(
+    <vscale x 4 x half> %passthru,
+    <vscale x 4 x half> %a1,
+    <vscale x 4 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @test_vpairo_mask_f16m1(<vscale x 4 x half> %passthru, <vscale x 4 x half> %a1, <vscale x 4 x half> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16(
+    <vscale x 4 x half> %passthru,
+    <vscale x 4 x half> %a1,
+    <vscale x 4 x half> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 8 x half> @test_vpairo_f16m2(<vscale x 8 x half> %passthru, <vscale x 8 x half> %a1, <vscale x 8 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.iXLen(
+    <vscale x 8 x half> %passthru,
+    <vscale x 8 x half> %a1,
+    <vscale x 8 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @test_vpairo_mask_f16m2(<vscale x 8 x half> %passthru, <vscale x 8 x half> %a1, <vscale x 8 x half> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16(
+    <vscale x 8 x half> %passthru,
+    <vscale x 8 x half> %a1,
+    <vscale x 8 x half> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 16 x half> @test_vpairo_f16m4(<vscale x 16 x half> %passthru, <vscale x 16 x half> %a1, <vscale x 16 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.iXLen(
+    <vscale x 16 x half> %passthru,
+    <vscale x 16 x half> %a1,
+    <vscale x 16 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @test_vpairo_mask_f16m4(<vscale x 16 x half> %passthru, <vscale x 16 x half> %a1, <vscale x 16 x half> %a2, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16(
+    <vscale x 16 x half> %passthru,
+    <vscale x 16 x half> %a1,
+    <vscale x 16 x half> %a2,
+    <vscale x 16 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 32 x half> @test_vpairo_f16m8(<vscale x 32 x half> %passthru, <vscale x 32 x half> %a1, <vscale x 32 x half> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.iXLen(
+    <vscale x 32 x half> %passthru,
+    <vscale x 32 x half> %a1,
+    <vscale x 32 x half> %a2,
+    iXLen %vl)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @test_vpairo_mask_f16m8(<vscale x 32 x half> %passthru, <vscale x 32 x half> %a1, <vscale x 32 x half> %a2, <vscale x 32 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re16.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16(
+    <vscale x 32 x half> %passthru,
+    <vscale x 32 x half> %a1,
+    <vscale x 32 x half> %a2,
+    <vscale x 32 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 32 x half> %r
+}
+
+; f32
+define <vscale x 1 x float> @test_vpairo_f32mf2(<vscale x 1 x float> %passthru, <vscale x 1 x float> %a1, <vscale x 1 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.iXLen(
+    <vscale x 1 x float> %passthru,
+    <vscale x 1 x float> %a1,
+    <vscale x 1 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @test_vpairo_mask_f32mf2(<vscale x 1 x float> %passthru, <vscale x 1 x float> %a1, <vscale x 1 x float> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32(
+    <vscale x 1 x float> %passthru,
+    <vscale x 1 x float> %a1,
+    <vscale x 1 x float> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 2 x float> @test_vpairo_f32m1(<vscale x 2 x float> %passthru, <vscale x 2 x float> %a1, <vscale x 2 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.iXLen(
+    <vscale x 2 x float> %passthru,
+    <vscale x 2 x float> %a1,
+    <vscale x 2 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @test_vpairo_mask_f32m1(<vscale x 2 x float> %passthru, <vscale x 2 x float> %a1, <vscale x 2 x float> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32(
+    <vscale x 2 x float> %passthru,
+    <vscale x 2 x float> %a1,
+    <vscale x 2 x float> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 4 x float> @test_vpairo_f32m2(<vscale x 4 x float> %passthru, <vscale x 4 x float> %a1, <vscale x 4 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.iXLen(
+    <vscale x 4 x float> %passthru,
+    <vscale x 4 x float> %a1,
+    <vscale x 4 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @test_vpairo_mask_f32m2(<vscale x 4 x float> %passthru, <vscale x 4 x float> %a1, <vscale x 4 x float> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32(
+    <vscale x 4 x float> %passthru,
+    <vscale x 4 x float> %a1,
+    <vscale x 4 x float> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 8 x float> @test_vpairo_f32m4(<vscale x 8 x float> %passthru, <vscale x 8 x float> %a1, <vscale x 8 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.iXLen(
+    <vscale x 8 x float> %passthru,
+    <vscale x 8 x float> %a1,
+    <vscale x 8 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @test_vpairo_mask_f32m4(<vscale x 8 x float> %passthru, <vscale x 8 x float> %a1, <vscale x 8 x float> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32(
+    <vscale x 8 x float> %passthru,
+    <vscale x 8 x float> %a1,
+    <vscale x 8 x float> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 16 x float> @test_vpairo_f32m8(<vscale x 16 x float> %passthru, <vscale x 16 x float> %a1, <vscale x 16 x float> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.iXLen(
+    <vscale x 16 x float> %passthru,
+    <vscale x 16 x float> %a1,
+    <vscale x 16 x float> %a2,
+    iXLen %vl)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @test_vpairo_mask_f32m8(<vscale x 16 x float> %passthru, <vscale x 16 x float> %a1, <vscale x 16 x float> %a2, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re32.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32(
+    <vscale x 16 x float> %passthru,
+    <vscale x 16 x float> %a1,
+    <vscale x 16 x float> %a2,
+    <vscale x 16 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 16 x float> %r
+}
+
+; f64
+define <vscale x 1 x double> @test_vpairo_f64m1(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a1, <vscale x 1 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.iXLen(
+    <vscale x 1 x double> %passthru,
+    <vscale x 1 x double> %a1,
+    <vscale x 1 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @test_vpairo_mask_f64m1(<vscale x 1 x double> %passthru, <vscale x 1 x double> %a1, <vscale x 1 x double> %a2, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64(
+    <vscale x 1 x double> %passthru,
+    <vscale x 1 x double> %a1,
+    <vscale x 1 x double> %a2,
+    <vscale x 1 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 2 x double> @test_vpairo_f64m2(<vscale x 2 x double> %passthru, <vscale x 2 x double> %a1, <vscale x 2 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v10, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.iXLen(
+    <vscale x 2 x double> %passthru,
+    <vscale x 2 x double> %a1,
+    <vscale x 2 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @test_vpairo_mask_f64m2(<vscale x 2 x double> %passthru, <vscale x 2 x double> %a1, <vscale x 2 x double> %a2, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64(
+    <vscale x 2 x double> %passthru,
+    <vscale x 2 x double> %a1,
+    <vscale x 2 x double> %a2,
+    <vscale x 2 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 4 x double> @test_vpairo_f64m4(<vscale x 4 x double> %passthru, <vscale x 4 x double> %a1, <vscale x 4 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v12, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.iXLen(
+    <vscale x 4 x double> %passthru,
+    <vscale x 4 x double> %a1,
+    <vscale x 4 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @test_vpairo_mask_f64m4(<vscale x 4 x double> %passthru, <vscale x 4 x double> %a1, <vscale x 4 x double> %a2, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64(
+    <vscale x 4 x double> %passthru,
+    <vscale x 4 x double> %a1,
+    <vscale x 4 x double> %a2,
+    <vscale x 4 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 8 x double> @test_vpairo_f64m8(<vscale x 8 x double> %passthru, <vscale x 8 x double> %a1, <vscale x 8 x double> %a2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_f64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
+; CHECK-NEXT:    vpairo.vv v8, v16, v24
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.iXLen(
+    <vscale x 8 x double> %passthru,
+    <vscale x 8 x double> %a1,
+    <vscale x 8 x double> %a2,
+    iXLen %vl)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @test_vpairo_mask_f64m8(<vscale x 8 x double> %passthru, <vscale x 8 x double> %a1, <vscale x 8 x double> %a2, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vpairo_mask_f64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vl8re64.v v24, (a0)
+; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64(
+    <vscale x 8 x double> %passthru,
+    <vscale x 8 x double> %a1,
+    <vscale x 8 x double> %a2,
+    <vscale x 8 x i1> %m,
+    iXLen %vl,
+    iXLen 1)
+  ret <vscale x 8 x double> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
new file mode 100644
index 0000000000000..46b90036804ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
@@ -0,0 +1,823 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @test_vunzipe_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.iXLen(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @test_vunzipe_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vunzipe_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.iXLen(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vunzipe_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vunzipe_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.iXLen(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vunzipe_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vunzipe_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.iXLen(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vunzipe_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vunzipe_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.iXLen(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vunzipe_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vunzipe_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.iXLen(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vunzipe_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @test_vunzipe_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.iXLen(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @test_vunzipe_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vunzipe_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.iXLen(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vunzipe_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vunzipe_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.iXLen(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vunzipe_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vunzipe_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.iXLen(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vunzipe_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vunzipe_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.iXLen(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vunzipe_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i32> @test_vunzipe_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.iXLen(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @test_vunzipe_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vunzipe_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.iXLen(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vunzipe_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vunzipe_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.iXLen(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vunzipe_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vunzipe_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.iXLen(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vunzipe_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 1 x i64> @test_vunzipe_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.iXLen(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @test_vunzipe_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vunzipe_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.iXLen(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vunzipe_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vunzipe_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.iXLen(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vunzipe_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %a
+}
+
+; Floating-point types
+
+; f16
+define <vscale x 1 x half> @test_vunzipe_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.iXLen(
+    <vscale x 1 x half> %passthru,
+    <vscale x 2 x half> %src,
+    iXLen %vl)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @test_vunzipe_mask_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16(
+    <vscale x 1 x half> %passthru,
+    <vscale x 2 x half> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 2 x half> @test_vunzipe_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.iXLen(
+    <vscale x 2 x half> %passthru,
+    <vscale x 4 x half> %src,
+    iXLen %vl)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @test_vunzipe_mask_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16(
+    <vscale x 2 x half> %passthru,
+    <vscale x 4 x half> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 4 x half> @test_vunzipe_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.iXLen(
+    <vscale x 4 x half> %passthru,
+    <vscale x 8 x half> %src,
+    iXLen %vl)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @test_vunzipe_mask_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16(
+    <vscale x 4 x half> %passthru,
+    <vscale x 8 x half> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 8 x half> @test_vunzipe_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.iXLen(
+    <vscale x 8 x half> %passthru,
+    <vscale x 16 x half> %src,
+    iXLen %vl)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @test_vunzipe_mask_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16(
+    <vscale x 8 x half> %passthru,
+    <vscale x 16 x half> %src,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 16 x half> @test_vunzipe_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.iXLen(
+    <vscale x 16 x half> %passthru,
+    <vscale x 32 x half> %src,
+    iXLen %vl)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @test_vunzipe_mask_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16(
+    <vscale x 16 x half> %passthru,
+    <vscale x 32 x half> %src,
+    <vscale x 16 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x half> %r
+}
+
+; f32
+define <vscale x 1 x float> @test_vunzipe_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.iXLen(
+    <vscale x 1 x float> %passthru,
+    <vscale x 2 x float> %src,
+    iXLen %vl)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @test_vunzipe_mask_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32(
+    <vscale x 1 x float> %passthru,
+    <vscale x 2 x float> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 2 x float> @test_vunzipe_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.iXLen(
+    <vscale x 2 x float> %passthru,
+    <vscale x 4 x float> %src,
+    iXLen %vl)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @test_vunzipe_mask_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32(
+    <vscale x 2 x float> %passthru,
+    <vscale x 4 x float> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 4 x float> @test_vunzipe_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.iXLen(
+    <vscale x 4 x float> %passthru,
+    <vscale x 8 x float> %src,
+    iXLen %vl)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @test_vunzipe_mask_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32(
+    <vscale x 4 x float> %passthru,
+    <vscale x 8 x float> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 8 x float> @test_vunzipe_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.iXLen(
+    <vscale x 8 x float> %passthru,
+    <vscale x 16 x float> %src,
+    iXLen %vl)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @test_vunzipe_mask_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32(
+    <vscale x 8 x float> %passthru,
+    <vscale x 16 x float> %src,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x float> %r
+}
+
+; f64
+define <vscale x 1 x double> @test_vunzipe_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.iXLen(
+    <vscale x 1 x double> %passthru,
+    <vscale x 2 x double> %src,
+    iXLen %vl)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @test_vunzipe_mask_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %passthru,
+    <vscale x 2 x double> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 2 x double> @test_vunzipe_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.iXLen(
+    <vscale x 2 x double> %passthru,
+    <vscale x 4 x double> %src,
+    iXLen %vl)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @test_vunzipe_mask_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64(
+    <vscale x 2 x double> %passthru,
+    <vscale x 4 x double> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 4 x double> @test_vunzipe_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vunzipe.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.iXLen(
+    <vscale x 4 x double> %passthru,
+    <vscale x 8 x double> %src,
+    iXLen %vl)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @test_vunzipe_mask_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipe_mask_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64(
+    <vscale x 4 x double> %passthru,
+    <vscale x 8 x double> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x double> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
new file mode 100644
index 0000000000000..07faa0f12df27
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
@@ -0,0 +1,823 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 1 x i8> @test_vunzipo_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.iXLen(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 1 x i8> @test_vunzipo_mask_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8mf8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8(
+    <vscale x 1 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vunzipo_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.iXLen(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vunzipo_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vunzipo_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.iXLen(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vunzipo_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vunzipo_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.iXLen(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vunzipo_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vunzipo_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.iXLen(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vunzipo_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vunzipo_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.iXLen(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    iXLen %vl)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vunzipo_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 64 x i8> %arg1,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 1 x i16> @test_vunzipo_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.iXLen(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 1 x i16> @test_vunzipo_mask_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16(
+    <vscale x 1 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vunzipo_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.iXLen(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vunzipo_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vunzipo_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.iXLen(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vunzipo_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vunzipo_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.iXLen(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vunzipo_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vunzipo_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.iXLen(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    iXLen %vl)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vunzipo_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 32 x i16> %arg1,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 1 x i32> @test_vunzipo_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.iXLen(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 1 x i32> @test_vunzipo_mask_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32(
+    <vscale x 1 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vunzipo_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.iXLen(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vunzipo_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vunzipo_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.iXLen(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vunzipo_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vunzipo_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.iXLen(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    iXLen %vl)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vunzipo_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 16 x i32> %arg1,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 1 x i64> @test_vunzipo_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.iXLen(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 1 x i64> @test_vunzipo_mask_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64(
+    <vscale x 1 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vunzipo_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.iXLen(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vunzipo_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vunzipo_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.iXLen(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    iXLen %vl)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vunzipo_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 8 x i64> %arg1,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %a
+}
+
+; Floating-point types
+
+; f16
+define <vscale x 1 x half> @test_vunzipo_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.iXLen(
+    <vscale x 1 x half> %passthru,
+    <vscale x 2 x half> %src,
+    iXLen %vl)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 1 x half> @test_vunzipo_mask_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16(
+    <vscale x 1 x half> %passthru,
+    <vscale x 2 x half> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x half> %r
+}
+
+define <vscale x 2 x half> @test_vunzipo_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.iXLen(
+    <vscale x 2 x half> %passthru,
+    <vscale x 4 x half> %src,
+    iXLen %vl)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @test_vunzipo_mask_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16(
+    <vscale x 2 x half> %passthru,
+    <vscale x 4 x half> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 4 x half> @test_vunzipo_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.iXLen(
+    <vscale x 4 x half> %passthru,
+    <vscale x 8 x half> %src,
+    iXLen %vl)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @test_vunzipo_mask_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16(
+    <vscale x 4 x half> %passthru,
+    <vscale x 8 x half> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 8 x half> @test_vunzipo_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.iXLen(
+    <vscale x 8 x half> %passthru,
+    <vscale x 16 x half> %src,
+    iXLen %vl)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @test_vunzipo_mask_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16(
+    <vscale x 8 x half> %passthru,
+    <vscale x 16 x half> %src,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 16 x half> @test_vunzipo_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.iXLen(
+    <vscale x 16 x half> %passthru,
+    <vscale x 32 x half> %src,
+    iXLen %vl)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @test_vunzipo_mask_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16(
+    <vscale x 16 x half> %passthru,
+    <vscale x 32 x half> %src,
+    <vscale x 16 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x half> %r
+}
+
+; f32
+define <vscale x 1 x float> @test_vunzipo_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v9
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.iXLen(
+    <vscale x 1 x float> %passthru,
+    <vscale x 2 x float> %src,
+    iXLen %vl)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 1 x float> @test_vunzipo_mask_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32(
+    <vscale x 1 x float> %passthru,
+    <vscale x 2 x float> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x float> %r
+}
+
+define <vscale x 2 x float> @test_vunzipo_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.iXLen(
+    <vscale x 2 x float> %passthru,
+    <vscale x 4 x float> %src,
+    iXLen %vl)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @test_vunzipo_mask_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32(
+    <vscale x 2 x float> %passthru,
+    <vscale x 4 x float> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 4 x float> @test_vunzipo_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.iXLen(
+    <vscale x 4 x float> %passthru,
+    <vscale x 8 x float> %src,
+    iXLen %vl)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @test_vunzipo_mask_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32(
+    <vscale x 4 x float> %passthru,
+    <vscale x 8 x float> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 8 x float> @test_vunzipo_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.iXLen(
+    <vscale x 8 x float> %passthru,
+    <vscale x 16 x float> %src,
+    iXLen %vl)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @test_vunzipo_mask_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32(
+    <vscale x 8 x float> %passthru,
+    <vscale x 16 x float> %src,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x float> %r
+}
+
+; f64
+define <vscale x 1 x double> @test_vunzipo_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.iXLen(
+    <vscale x 1 x double> %passthru,
+    <vscale x 2 x double> %src,
+    iXLen %vl)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 1 x double> @test_vunzipo_mask_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64(
+    <vscale x 1 x double> %passthru,
+    <vscale x 2 x double> %src,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 1 x double> %r
+}
+
+define <vscale x 2 x double> @test_vunzipo_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v12
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.iXLen(
+    <vscale x 2 x double> %passthru,
+    <vscale x 4 x double> %src,
+    iXLen %vl)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @test_vunzipo_mask_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64(
+    <vscale x 2 x double> %passthru,
+    <vscale x 4 x double> %src,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 4 x double> @test_vunzipo_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vunzipo.v v8, v16
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.iXLen(
+    <vscale x 4 x double> %passthru,
+    <vscale x 8 x double> %src,
+    iXLen %vl)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @test_vunzipo_mask_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vunzipo_mask_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64(
+    <vscale x 4 x double> %passthru,
+    <vscale x 8 x double> %src,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x double> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzip.ll b/llvm/test/CodeGen/RISCV/rvv/vzip.ll
new file mode 100644
index 0000000000000..afba044383abc
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/vzip.ll
@@ -0,0 +1,883 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: sed 's/iXLen/i32/g' %s | llc -mtriple=riscv32 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV32
+; RUN: sed 's/iXLen/i64/g' %s | llc -mtriple=riscv64 -mattr=+v,+zvfh,+experimental-zvzip \
+; RUN:   -verify-machineinstrs | FileCheck %s --check-prefixes=CHECK,RV64
+
+define <vscale x 2 x i8> @test_vzip_vv_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.iXLen(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 2 x i8> @test_vzip_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 1 x i8> %arg1, <vscale x 1 x i8> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8(
+    <vscale x 2 x i8> %passthru,
+    <vscale x 1 x i8> %arg1,
+    <vscale x 1 x i8> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vzip_vv_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.iXLen(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 4 x i8> @test_vzip_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 2 x i8> %arg1, <vscale x 2 x i8> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8(
+    <vscale x 4 x i8> %passthru,
+    <vscale x 2 x i8> %arg1,
+    <vscale x 2 x i8> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vzip_vv_i8m1(<vscale x 8 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.iXLen(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 8 x i8> @test_vzip_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <vscale x 4 x i8> %arg1, <vscale x 4 x i8> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8(
+    <vscale x 8 x i8> %passthru,
+    <vscale x 4 x i8> %arg1,
+    <vscale x 4 x i8> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vzip_vv_i8m2(<vscale x 16 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.iXLen(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 16 x i8> @test_vzip_vv_mask_i8m2(<vscale x 16 x i8> %passthru, <vscale x 8 x i8> %arg1, <vscale x 8 x i8> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8(
+    <vscale x 16 x i8> %passthru,
+    <vscale x 8 x i8> %arg1,
+    <vscale x 8 x i8> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vzip_vv_i8m4(<vscale x 32 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.iXLen(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 32 x i8> @test_vzip_vv_mask_i8m4(<vscale x 32 x i8> %passthru, <vscale x 16 x i8> %arg1, <vscale x 16 x i8> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8(
+    <vscale x 32 x i8> %passthru,
+    <vscale x 16 x i8> %arg1,
+    <vscale x 16 x i8> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vzip_vv_i8m8(<vscale x 64 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.iXLen(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    iXLen %vl)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 64 x i8> @test_vzip_vv_mask_i8m8(<vscale x 64 x i8> %passthru, <vscale x 32 x i8> %arg1, <vscale x 32 x i8> %arg2, <vscale x 32 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i8m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8(
+    <vscale x 64 x i8> %passthru,
+    <vscale x 32 x i8> %arg1,
+    <vscale x 32 x i8> %arg2,
+    <vscale x 32 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 64 x i8> %a
+}
+
+define <vscale x 2 x i16> @test_vzip_vv_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.iXLen(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 2 x i16> @test_vzip_vv_mask_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 1 x i16> %arg1, <vscale x 1 x i16> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16(
+    <vscale x 2 x i16> %passthru,
+    <vscale x 1 x i16> %arg1,
+    <vscale x 1 x i16> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vzip_vv_i16m1(<vscale x 4 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.iXLen(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 4 x i16> @test_vzip_vv_mask_i16m1(<vscale x 4 x i16> %passthru, <vscale x 2 x i16> %arg1, <vscale x 2 x i16> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16(
+    <vscale x 4 x i16> %passthru,
+    <vscale x 2 x i16> %arg1,
+    <vscale x 2 x i16> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vzip_vv_i16m2(<vscale x 8 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.iXLen(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 8 x i16> @test_vzip_vv_mask_i16m2(<vscale x 8 x i16> %passthru, <vscale x 4 x i16> %arg1, <vscale x 4 x i16> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16(
+    <vscale x 8 x i16> %passthru,
+    <vscale x 4 x i16> %arg1,
+    <vscale x 4 x i16> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vzip_vv_i16m4(<vscale x 16 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.iXLen(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 16 x i16> @test_vzip_vv_mask_i16m4(<vscale x 16 x i16> %passthru, <vscale x 8 x i16> %arg1, <vscale x 8 x i16> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16(
+    <vscale x 16 x i16> %passthru,
+    <vscale x 8 x i16> %arg1,
+    <vscale x 8 x i16> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vzip_vv_i16m8(<vscale x 32 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.iXLen(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    iXLen %vl)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 32 x i16> @test_vzip_vv_mask_i16m8(<vscale x 32 x i16> %passthru, <vscale x 16 x i16> %arg1, <vscale x 16 x i16> %arg2, <vscale x 16 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i16m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16(
+    <vscale x 32 x i16> %passthru,
+    <vscale x 16 x i16> %arg1,
+    <vscale x 16 x i16> %arg2,
+    <vscale x 16 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x i16> %a
+}
+
+define <vscale x 2 x i32> @test_vzip_vv_i32m1(<vscale x 2 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.iXLen(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 2 x i32> @test_vzip_vv_mask_i32m1(<vscale x 2 x i32> %passthru, <vscale x 1 x i32> %arg1, <vscale x 1 x i32> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32(
+    <vscale x 2 x i32> %passthru,
+    <vscale x 1 x i32> %arg1,
+    <vscale x 1 x i32> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vzip_vv_i32m2(<vscale x 4 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.iXLen(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 4 x i32> @test_vzip_vv_mask_i32m2(<vscale x 4 x i32> %passthru, <vscale x 2 x i32> %arg1, <vscale x 2 x i32> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32(
+    <vscale x 4 x i32> %passthru,
+    <vscale x 2 x i32> %arg1,
+    <vscale x 2 x i32> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vzip_vv_i32m4(<vscale x 8 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.iXLen(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 8 x i32> @test_vzip_vv_mask_i32m4(<vscale x 8 x i32> %passthru, <vscale x 4 x i32> %arg1, <vscale x 4 x i32> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32(
+    <vscale x 8 x i32> %passthru,
+    <vscale x 4 x i32> %arg1,
+    <vscale x 4 x i32> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vzip_vv_i32m8(<vscale x 16 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.iXLen(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    iXLen %vl)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 16 x i32> @test_vzip_vv_mask_i32m8(<vscale x 16 x i32> %passthru, <vscale x 8 x i32> %arg1, <vscale x 8 x i32> %arg2, <vscale x 8 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i32m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32(
+    <vscale x 16 x i32> %passthru,
+    <vscale x 8 x i32> %arg1,
+    <vscale x 8 x i32> %arg2,
+    <vscale x 8 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x i32> %a
+}
+
+define <vscale x 2 x i64> @test_vzip_vv_i64m2(<vscale x 2 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.iXLen(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 2 x i64> @test_vzip_vv_mask_i64m2(<vscale x 2 x i64> %passthru, <vscale x 1 x i64> %arg1, <vscale x 1 x i64> %arg2, <vscale x 1 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64(
+    <vscale x 2 x i64> %passthru,
+    <vscale x 1 x i64> %arg1,
+    <vscale x 1 x i64> %arg2,
+    <vscale x 1 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vzip_vv_i64m4(<vscale x 4 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.iXLen(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 4 x i64> @test_vzip_vv_mask_i64m4(<vscale x 4 x i64> %passthru, <vscale x 2 x i64> %arg1, <vscale x 2 x i64> %arg2, <vscale x 2 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64(
+    <vscale x 4 x i64> %passthru,
+    <vscale x 2 x i64> %arg1,
+    <vscale x 2 x i64> %arg2,
+    <vscale x 2 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vzip_vv_i64m8(<vscale x 8 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.iXLen(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    iXLen %vl)
+  ret <vscale x 8 x i64> %a
+}
+
+define <vscale x 8 x i64> @test_vzip_vv_mask_i64m8(<vscale x 8 x i64> %passthru, <vscale x 4 x i64> %arg1, <vscale x 4 x i64> %arg2, <vscale x 4 x i1> %mask, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_i64m8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %a = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64(
+    <vscale x 8 x i64> %passthru,
+    <vscale x 4 x i64> %arg1,
+    <vscale x 4 x i64> %arg2,
+    <vscale x 4 x i1> %mask,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x i64> %a
+}
+
+; Floating-point types
+
+; f16
+define <vscale x 2 x half> @test_vzip_vv_f16mf4(<vscale x 2 x half> %passthru, <vscale x 1 x half> %a, <vscale x 1 x half> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.iXLen(
+    <vscale x 2 x half> %passthru,
+    <vscale x 1 x half> %a,
+    <vscale x 1 x half> %b,
+    iXLen %vl)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 2 x half> @test_vzip_vv_mask_f16mf4(<vscale x 2 x half> %passthru, <vscale x 1 x half> %a, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f16mf4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16(
+    <vscale x 2 x half> %passthru,
+    <vscale x 1 x half> %a,
+    <vscale x 1 x half> %b,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x half> %r
+}
+
+define <vscale x 4 x half> @test_vzip_vv_f16mf2(<vscale x 4 x half> %passthru, <vscale x 2 x half> %a, <vscale x 2 x half> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.iXLen(
+    <vscale x 4 x half> %passthru,
+    <vscale x 2 x half> %a,
+    <vscale x 2 x half> %b,
+    iXLen %vl)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 4 x half> @test_vzip_vv_mask_f16mf2(<vscale x 4 x half> %passthru, <vscale x 2 x half> %a, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f16mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16(
+    <vscale x 4 x half> %passthru,
+    <vscale x 2 x half> %a,
+    <vscale x 2 x half> %b,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x half> %r
+}
+
+define <vscale x 8 x half> @test_vzip_vv_f16m1(<vscale x 8 x half> %passthru, <vscale x 4 x half> %a, <vscale x 4 x half> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.iXLen(
+    <vscale x 8 x half> %passthru,
+    <vscale x 4 x half> %a,
+    <vscale x 4 x half> %b,
+    iXLen %vl)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 8 x half> @test_vzip_vv_mask_f16m1(<vscale x 8 x half> %passthru, <vscale x 4 x half> %a, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f16m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16(
+    <vscale x 8 x half> %passthru,
+    <vscale x 4 x half> %a,
+    <vscale x 4 x half> %b,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x half> %r
+}
+
+define <vscale x 16 x half> @test_vzip_vv_f16m2(<vscale x 16 x half> %passthru, <vscale x 8 x half> %a, <vscale x 8 x half> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.iXLen(
+    <vscale x 16 x half> %passthru,
+    <vscale x 8 x half> %a,
+    <vscale x 8 x half> %b,
+    iXLen %vl)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 16 x half> @test_vzip_vv_mask_f16m2(<vscale x 16 x half> %passthru, <vscale x 8 x half> %a, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f16m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16(
+    <vscale x 16 x half> %passthru,
+    <vscale x 8 x half> %a,
+    <vscale x 8 x half> %b,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x half> %r
+}
+
+define <vscale x 32 x half> @test_vzip_vv_f16m4(<vscale x 32 x half> %passthru, <vscale x 16 x half> %a, <vscale x 16 x half> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.iXLen(
+    <vscale x 32 x half> %passthru,
+    <vscale x 16 x half> %a,
+    <vscale x 16 x half> %b,
+    iXLen %vl)
+  ret <vscale x 32 x half> %r
+}
+
+define <vscale x 32 x half> @test_vzip_vv_mask_f16m4(<vscale x 32 x half> %passthru, <vscale x 16 x half> %a, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f16m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16(
+    <vscale x 32 x half> %passthru,
+    <vscale x 16 x half> %a,
+    <vscale x 16 x half> %b,
+    <vscale x 16 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 32 x half> %r
+}
+
+; f32
+define <vscale x 2 x float> @test_vzip_vv_f32mf2(<vscale x 2 x float> %passthru, <vscale x 1 x float> %a, <vscale x 1 x float> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v9, v10
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.iXLen(
+    <vscale x 2 x float> %passthru,
+    <vscale x 1 x float> %a,
+    <vscale x 1 x float> %b,
+    iXLen %vl)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 2 x float> @test_vzip_vv_mask_f32mf2(<vscale x 2 x float> %passthru, <vscale x 1 x float> %a, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f32mf2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32(
+    <vscale x 2 x float> %passthru,
+    <vscale x 1 x float> %a,
+    <vscale x 1 x float> %b,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x float> %r
+}
+
+define <vscale x 4 x float> @test_vzip_vv_f32m1(<vscale x 4 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x float> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.iXLen(
+    <vscale x 4 x float> %passthru,
+    <vscale x 2 x float> %a,
+    <vscale x 2 x float> %b,
+    iXLen %vl)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 4 x float> @test_vzip_vv_mask_f32m1(<vscale x 4 x float> %passthru, <vscale x 2 x float> %a, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f32m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32(
+    <vscale x 4 x float> %passthru,
+    <vscale x 2 x float> %a,
+    <vscale x 2 x float> %b,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x float> %r
+}
+
+define <vscale x 8 x float> @test_vzip_vv_f32m2(<vscale x 8 x float> %passthru, <vscale x 4 x float> %a, <vscale x 4 x float> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.iXLen(
+    <vscale x 8 x float> %passthru,
+    <vscale x 4 x float> %a,
+    <vscale x 4 x float> %b,
+    iXLen %vl)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 8 x float> @test_vzip_vv_mask_f32m2(<vscale x 8 x float> %passthru, <vscale x 4 x float> %a, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f32m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32(
+    <vscale x 8 x float> %passthru,
+    <vscale x 4 x float> %a,
+    <vscale x 4 x float> %b,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x float> %r
+}
+
+define <vscale x 16 x float> @test_vzip_vv_f32m4(<vscale x 16 x float> %passthru, <vscale x 8 x float> %a, <vscale x 8 x float> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.iXLen(
+    <vscale x 16 x float> %passthru,
+    <vscale x 8 x float> %a,
+    <vscale x 8 x float> %b,
+    iXLen %vl)
+  ret <vscale x 16 x float> %r
+}
+
+define <vscale x 16 x float> @test_vzip_vv_mask_f32m4(<vscale x 16 x float> %passthru, <vscale x 8 x float> %a, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f32m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32(
+    <vscale x 16 x float> %passthru,
+    <vscale x 8 x float> %a,
+    <vscale x 8 x float> %b,
+    <vscale x 8 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 16 x float> %r
+}
+
+; f64
+define <vscale x 2 x double> @test_vzip_vv_f64m1(<vscale x 2 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v10, v11
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.iXLen(
+    <vscale x 2 x double> %passthru,
+    <vscale x 1 x double> %a,
+    <vscale x 1 x double> %b,
+    iXLen %vl)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 2 x double> @test_vzip_vv_mask_f64m1(<vscale x 2 x double> %passthru, <vscale x 1 x double> %a, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f64m1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64(
+    <vscale x 2 x double> %passthru,
+    <vscale x 1 x double> %a,
+    <vscale x 1 x double> %b,
+    <vscale x 1 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 2 x double> %r
+}
+
+define <vscale x 4 x double> @test_vzip_vv_f64m2(<vscale x 4 x double> %passthru, <vscale x 2 x double> %a, <vscale x 2 x double> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v12, v14
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.iXLen(
+    <vscale x 4 x double> %passthru,
+    <vscale x 2 x double> %a,
+    <vscale x 2 x double> %b,
+    iXLen %vl)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 4 x double> @test_vzip_vv_mask_f64m2(<vscale x 4 x double> %passthru, <vscale x 2 x double> %a, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f64m2:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64(
+    <vscale x 4 x double> %passthru,
+    <vscale x 2 x double> %a,
+    <vscale x 2 x double> %b,
+    <vscale x 2 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 4 x double> %r
+}
+
+define <vscale x 8 x double> @test_vzip_vv_f64m4(<vscale x 8 x double> %passthru, <vscale x 4 x double> %a, <vscale x 4 x double> %b, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
+; CHECK-NEXT:    vzip.vv v8, v16, v20
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.iXLen(
+    <vscale x 8 x double> %passthru,
+    <vscale x 4 x double> %a,
+    <vscale x 4 x double> %b,
+    iXLen %vl)
+  ret <vscale x 8 x double> %r
+}
+
+define <vscale x 8 x double> @test_vzip_vv_mask_f64m4(<vscale x 8 x double> %passthru, <vscale x 4 x double> %a, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, iXLen %vl) nounwind {
+; CHECK-LABEL: test_vzip_vv_mask_f64m4:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
+; CHECK-NEXT:    ret
+  %r = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64(
+    <vscale x 8 x double> %passthru,
+    <vscale x 4 x double> %a,
+    <vscale x 4 x double> %b,
+    <vscale x 4 x i1> %m,
+    iXLen %vl, iXLen 1)
+  ret <vscale x 8 x double> %r
+}
+;; NOTE: These prefixes are unused and the list is autogenerated. Do not add tests below this line:
+; RV32: {{.*}}
+; RV64: {{.*}}

>From 7aa0a4e495d3780637c19549a4ec60e81531cd7c Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Sat, 28 Mar 2026 01:16:37 +0800
Subject: [PATCH 2/5] Remove BF16 (we will add it in the future)

---
 llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td | 7 -------
 1 file changed, 7 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
index bc838fbfd95a9..d3e84a3f9a9f7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
@@ -70,13 +70,6 @@ defset list<VTypeInfoToWide> AllZvzipVectors = {
   def : VTypeInfoToWide<VF64M1,  VF64M2>;
   def : VTypeInfoToWide<VF64M2,  VF64M4>;
   def : VTypeInfoToWide<VF64M4,  VF64M8>;
-
-  // BF16 (16-bit)
-  def : VTypeInfoToWide<VBF16MF4, VBF16MF2>;
-  def : VTypeInfoToWide<VBF16MF2, VBF16M1>;
-  def : VTypeInfoToWide<VBF16M1,  VBF16M2>;
-  def : VTypeInfoToWide<VBF16M2,  VBF16M4>;
-  def : VTypeInfoToWide<VBF16M4,  VBF16M8>;
 }
 
 multiclass VPseudoVZIP {

>From dc3f7f1c693705a30009beecb22c738345904776 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Mon, 30 Mar 2026 21:16:31 +0800
Subject: [PATCH 3/5] Use VPatUnaryNoMask/VPatUnaryMask

---
 llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td | 28 ++-------
 llvm/test/CodeGen/RISCV/rvv/vunzipe.ll       | 60 ++++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vunzipo.ll       | 60 ++++++++++----------
 3 files changed, 66 insertions(+), 82 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
index d3e84a3f9a9f7..696b8ec01c8d7 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
@@ -107,28 +107,12 @@ multiclass VPatVUNZIPIntrinsic<string intrinsic_name, string instruction_name> {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
     let Predicates = !listconcat(GetVTypePredicates<wti>.Predicates, [HasStdExtZvzip]) in {
-      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic_name)
-                              (vti.Vector vti.RegClass:$passthru),
-                              (wti.Vector wti.RegClass:$rs2),
-                              VLOpFrag)),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX)
-                  vti.RegClass:$passthru,
-                  wti.RegClass:$rs2,
-                  GPR:$vl,
-                  vti.Log2SEW,
-                  TAIL_AGNOSTIC)>;
-      def : Pat<(vti.Vector (!cast<Intrinsic>(intrinsic_name # "_mask")
-                              (vti.Vector vti.RegClass:$passthru),
-                              (wti.Vector wti.RegClass:$rs2),
-                              (vti.Mask VMV0:$vm),
-                              VLOpFrag, (XLenVT timm:$policy))),
-                (!cast<Instruction>(instruction_name#"_V_"#vti.LMul.MX#"_MASK")
-                  vti.RegClass:$passthru,
-                  wti.RegClass:$rs2,
-                  (vti.Mask VMV0:$vm),
-                  GPR:$vl,
-                  vti.Log2SEW,
-                  timm:$policy)>;
+      def : VPatUnaryNoMask<intrinsic_name, instruction_name, "V",
+                            vti.Vector, wti.Vector, vti.Log2SEW,
+                            vti.LMul, vti.RegClass, wti.RegClass>;
+      def : VPatUnaryMask<intrinsic_name, instruction_name, "V",
+                          vti.Vector, wti.Vector, vti.Mask, vti.Log2SEW,
+                          vti.LMul, vti.RegClass, wti.RegClass>;
     }
   }
 }
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
index 46b90036804ee..496c21c0676c6 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
@@ -7,7 +7,7 @@
 define <vscale x 1 x i8> @test_vunzipe_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8mf8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.iXLen(
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @test_vunzipe_mask_i8mf8(<vscale x 1 x i8> %passthru, <
 define <vscale x 2 x i8> @test_vunzipe_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.iXLen(
@@ -61,7 +61,7 @@ define <vscale x 2 x i8> @test_vunzipe_mask_i8mf4(<vscale x 2 x i8> %passthru, <
 define <vscale x 4 x i8> @test_vunzipe_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.iXLen(
@@ -88,7 +88,7 @@ define <vscale x 4 x i8> @test_vunzipe_mask_i8mf2(<vscale x 4 x i8> %passthru, <
 define <vscale x 8 x i8> @test_vunzipe_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.iXLen(
@@ -115,7 +115,7 @@ define <vscale x 8 x i8> @test_vunzipe_mask_i8m1(<vscale x 8 x i8> %passthru, <v
 define <vscale x 16 x i8> @test_vunzipe_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.iXLen(
@@ -142,7 +142,7 @@ define <vscale x 16 x i8> @test_vunzipe_mask_i8m2(<vscale x 16 x i8> %passthru,
 define <vscale x 32 x i8> @test_vunzipe_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i8m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.iXLen(
@@ -169,7 +169,7 @@ define <vscale x 32 x i8> @test_vunzipe_mask_i8m4(<vscale x 32 x i8> %passthru,
 define <vscale x 1 x i16> @test_vunzipe_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.iXLen(
@@ -196,7 +196,7 @@ define <vscale x 1 x i16> @test_vunzipe_mask_i16mf4(<vscale x 1 x i16> %passthru
 define <vscale x 2 x i16> @test_vunzipe_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i16mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.iXLen(
@@ -223,7 +223,7 @@ define <vscale x 2 x i16> @test_vunzipe_mask_i16mf2(<vscale x 2 x i16> %passthru
 define <vscale x 4 x i16> @test_vunzipe_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i16m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.iXLen(
@@ -250,7 +250,7 @@ define <vscale x 4 x i16> @test_vunzipe_mask_i16m1(<vscale x 4 x i16> %passthru,
 define <vscale x 8 x i16> @test_vunzipe_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i16m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.iXLen(
@@ -277,7 +277,7 @@ define <vscale x 8 x i16> @test_vunzipe_mask_i16m2(<vscale x 8 x i16> %passthru,
 define <vscale x 16 x i16> @test_vunzipe_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i16m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.iXLen(
@@ -304,7 +304,7 @@ define <vscale x 16 x i16> @test_vunzipe_mask_i16m4(<vscale x 16 x i16> %passthr
 define <vscale x 1 x i32> @test_vunzipe_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i32mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.iXLen(
@@ -331,7 +331,7 @@ define <vscale x 1 x i32> @test_vunzipe_mask_i32mf2(<vscale x 1 x i32> %passthru
 define <vscale x 2 x i32> @test_vunzipe_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i32m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.iXLen(
@@ -358,7 +358,7 @@ define <vscale x 2 x i32> @test_vunzipe_mask_i32m1(<vscale x 2 x i32> %passthru,
 define <vscale x 4 x i32> @test_vunzipe_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.iXLen(
@@ -385,7 +385,7 @@ define <vscale x 4 x i32> @test_vunzipe_mask_i32m2(<vscale x 4 x i32> %passthru,
 define <vscale x 8 x i32> @test_vunzipe_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i32m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.iXLen(
@@ -412,7 +412,7 @@ define <vscale x 8 x i32> @test_vunzipe_mask_i32m4(<vscale x 8 x i32> %passthru,
 define <vscale x 1 x i64> @test_vunzipe_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i64m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.iXLen(
@@ -439,7 +439,7 @@ define <vscale x 1 x i64> @test_vunzipe_mask_i64m1(<vscale x 1 x i64> %passthru,
 define <vscale x 2 x i64> @test_vunzipe_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i64m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.iXLen(
@@ -466,7 +466,7 @@ define <vscale x 2 x i64> @test_vunzipe_mask_i64m2(<vscale x 2 x i64> %passthru,
 define <vscale x 4 x i64> @test_vunzipe_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_i64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.iXLen(
@@ -496,7 +496,7 @@ define <vscale x 4 x i64> @test_vunzipe_mask_i64m4(<vscale x 4 x i64> %passthru,
 define <vscale x 1 x half> @test_vunzipe_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.iXLen(
@@ -523,7 +523,7 @@ define <vscale x 1 x half> @test_vunzipe_mask_f16mf4(<vscale x 1 x half> %passth
 define <vscale x 2 x half> @test_vunzipe_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f16mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.iXLen(
@@ -550,7 +550,7 @@ define <vscale x 2 x half> @test_vunzipe_mask_f16mf2(<vscale x 2 x half> %passth
 define <vscale x 4 x half> @test_vunzipe_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f16m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.iXLen(
@@ -577,7 +577,7 @@ define <vscale x 4 x half> @test_vunzipe_mask_f16m1(<vscale x 4 x half> %passthr
 define <vscale x 8 x half> @test_vunzipe_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f16m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.iXLen(
@@ -604,7 +604,7 @@ define <vscale x 8 x half> @test_vunzipe_mask_f16m2(<vscale x 8 x half> %passthr
 define <vscale x 16 x half> @test_vunzipe_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f16m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.iXLen(
@@ -632,7 +632,7 @@ define <vscale x 16 x half> @test_vunzipe_mask_f16m4(<vscale x 16 x half> %passt
 define <vscale x 1 x float> @test_vunzipe_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f32mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.iXLen(
@@ -659,7 +659,7 @@ define <vscale x 1 x float> @test_vunzipe_mask_f32mf2(<vscale x 1 x float> %pass
 define <vscale x 2 x float> @test_vunzipe_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f32m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.iXLen(
@@ -686,7 +686,7 @@ define <vscale x 2 x float> @test_vunzipe_mask_f32m1(<vscale x 2 x float> %passt
 define <vscale x 4 x float> @test_vunzipe_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.iXLen(
@@ -713,7 +713,7 @@ define <vscale x 4 x float> @test_vunzipe_mask_f32m2(<vscale x 4 x float> %passt
 define <vscale x 8 x float> @test_vunzipe_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f32m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.iXLen(
@@ -741,7 +741,7 @@ define <vscale x 8 x float> @test_vunzipe_mask_f32m4(<vscale x 8 x float> %passt
 define <vscale x 1 x double> @test_vunzipe_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f64m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.iXLen(
@@ -768,7 +768,7 @@ define <vscale x 1 x double> @test_vunzipe_mask_f64m1(<vscale x 1 x double> %pas
 define <vscale x 2 x double> @test_vunzipe_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f64m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.iXLen(
@@ -795,7 +795,7 @@ define <vscale x 2 x double> @test_vunzipe_mask_f64m2(<vscale x 2 x double> %pas
 define <vscale x 4 x double> @test_vunzipe_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipe_f64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.iXLen(
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
index 07faa0f12df27..4d3aa18304cdc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
@@ -7,7 +7,7 @@
 define <vscale x 1 x i8> @test_vunzipo_i8mf8(<vscale x 1 x i8> %passthru, <vscale x 2 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8mf8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.iXLen(
@@ -34,7 +34,7 @@ define <vscale x 1 x i8> @test_vunzipo_mask_i8mf8(<vscale x 1 x i8> %passthru, <
 define <vscale x 2 x i8> @test_vunzipo_i8mf4(<vscale x 2 x i8> %passthru, <vscale x 4 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.iXLen(
@@ -61,7 +61,7 @@ define <vscale x 2 x i8> @test_vunzipo_mask_i8mf4(<vscale x 2 x i8> %passthru, <
 define <vscale x 4 x i8> @test_vunzipo_i8mf2(<vscale x 4 x i8> %passthru, <vscale x 8 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.iXLen(
@@ -88,7 +88,7 @@ define <vscale x 4 x i8> @test_vunzipo_mask_i8mf2(<vscale x 4 x i8> %passthru, <
 define <vscale x 8 x i8> @test_vunzipo_i8m1(<vscale x 8 x i8> %passthru, <vscale x 16 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.iXLen(
@@ -115,7 +115,7 @@ define <vscale x 8 x i8> @test_vunzipo_mask_i8m1(<vscale x 8 x i8> %passthru, <v
 define <vscale x 16 x i8> @test_vunzipo_i8m2(<vscale x 16 x i8> %passthru, <vscale x 32 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.iXLen(
@@ -142,7 +142,7 @@ define <vscale x 16 x i8> @test_vunzipo_mask_i8m2(<vscale x 16 x i8> %passthru,
 define <vscale x 32 x i8> @test_vunzipo_i8m4(<vscale x 32 x i8> %passthru, <vscale x 64 x i8> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i8m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.iXLen(
@@ -169,7 +169,7 @@ define <vscale x 32 x i8> @test_vunzipo_mask_i8m4(<vscale x 32 x i8> %passthru,
 define <vscale x 1 x i16> @test_vunzipo_i16mf4(<vscale x 1 x i16> %passthru, <vscale x 2 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.iXLen(
@@ -196,7 +196,7 @@ define <vscale x 1 x i16> @test_vunzipo_mask_i16mf4(<vscale x 1 x i16> %passthru
 define <vscale x 2 x i16> @test_vunzipo_i16mf2(<vscale x 2 x i16> %passthru, <vscale x 4 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i16mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.iXLen(
@@ -223,7 +223,7 @@ define <vscale x 2 x i16> @test_vunzipo_mask_i16mf2(<vscale x 2 x i16> %passthru
 define <vscale x 4 x i16> @test_vunzipo_i16m1(<vscale x 4 x i16> %passthru, <vscale x 8 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i16m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.iXLen(
@@ -250,7 +250,7 @@ define <vscale x 4 x i16> @test_vunzipo_mask_i16m1(<vscale x 4 x i16> %passthru,
 define <vscale x 8 x i16> @test_vunzipo_i16m2(<vscale x 8 x i16> %passthru, <vscale x 16 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i16m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.iXLen(
@@ -277,7 +277,7 @@ define <vscale x 8 x i16> @test_vunzipo_mask_i16m2(<vscale x 8 x i16> %passthru,
 define <vscale x 16 x i16> @test_vunzipo_i16m4(<vscale x 16 x i16> %passthru, <vscale x 32 x i16> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i16m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.iXLen(
@@ -304,7 +304,7 @@ define <vscale x 16 x i16> @test_vunzipo_mask_i16m4(<vscale x 16 x i16> %passthr
 define <vscale x 1 x i32> @test_vunzipo_i32mf2(<vscale x 1 x i32> %passthru, <vscale x 2 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i32mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.iXLen(
@@ -331,7 +331,7 @@ define <vscale x 1 x i32> @test_vunzipo_mask_i32mf2(<vscale x 1 x i32> %passthru
 define <vscale x 2 x i32> @test_vunzipo_i32m1(<vscale x 2 x i32> %passthru, <vscale x 4 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i32m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.iXLen(
@@ -358,7 +358,7 @@ define <vscale x 2 x i32> @test_vunzipo_mask_i32m1(<vscale x 2 x i32> %passthru,
 define <vscale x 4 x i32> @test_vunzipo_i32m2(<vscale x 4 x i32> %passthru, <vscale x 8 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.iXLen(
@@ -385,7 +385,7 @@ define <vscale x 4 x i32> @test_vunzipo_mask_i32m2(<vscale x 4 x i32> %passthru,
 define <vscale x 8 x i32> @test_vunzipo_i32m4(<vscale x 8 x i32> %passthru, <vscale x 16 x i32> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i32m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.iXLen(
@@ -412,7 +412,7 @@ define <vscale x 8 x i32> @test_vunzipo_mask_i32m4(<vscale x 8 x i32> %passthru,
 define <vscale x 1 x i64> @test_vunzipo_i64m1(<vscale x 1 x i64> %passthru, <vscale x 2 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i64m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.iXLen(
@@ -439,7 +439,7 @@ define <vscale x 1 x i64> @test_vunzipo_mask_i64m1(<vscale x 1 x i64> %passthru,
 define <vscale x 2 x i64> @test_vunzipo_i64m2(<vscale x 2 x i64> %passthru, <vscale x 4 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i64m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.iXLen(
@@ -466,7 +466,7 @@ define <vscale x 2 x i64> @test_vunzipo_mask_i64m2(<vscale x 2 x i64> %passthru,
 define <vscale x 4 x i64> @test_vunzipo_i64m4(<vscale x 4 x i64> %passthru, <vscale x 8 x i64> %arg1, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_i64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.iXLen(
@@ -496,7 +496,7 @@ define <vscale x 4 x i64> @test_vunzipo_mask_i64m4(<vscale x 4 x i64> %passthru,
 define <vscale x 1 x half> @test_vunzipo_f16mf4(<vscale x 1 x half> %passthru, <vscale x 2 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f16mf4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.iXLen(
@@ -523,7 +523,7 @@ define <vscale x 1 x half> @test_vunzipo_mask_f16mf4(<vscale x 1 x half> %passth
 define <vscale x 2 x half> @test_vunzipo_f16mf2(<vscale x 2 x half> %passthru, <vscale x 4 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f16mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.iXLen(
@@ -550,7 +550,7 @@ define <vscale x 2 x half> @test_vunzipo_mask_f16mf2(<vscale x 2 x half> %passth
 define <vscale x 4 x half> @test_vunzipo_f16m1(<vscale x 4 x half> %passthru, <vscale x 8 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f16m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.iXLen(
@@ -577,7 +577,7 @@ define <vscale x 4 x half> @test_vunzipo_mask_f16m1(<vscale x 4 x half> %passthr
 define <vscale x 8 x half> @test_vunzipo_f16m2(<vscale x 8 x half> %passthru, <vscale x 16 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f16m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.iXLen(
@@ -604,7 +604,7 @@ define <vscale x 8 x half> @test_vunzipo_mask_f16m2(<vscale x 8 x half> %passthr
 define <vscale x 16 x half> @test_vunzipo_f16m4(<vscale x 16 x half> %passthru, <vscale x 32 x half> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f16m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.iXLen(
@@ -632,7 +632,7 @@ define <vscale x 16 x half> @test_vunzipo_mask_f16m4(<vscale x 16 x half> %passt
 define <vscale x 1 x float> @test_vunzipo_f32mf2(<vscale x 1 x float> %passthru, <vscale x 2 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f32mf2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.iXLen(
@@ -659,7 +659,7 @@ define <vscale x 1 x float> @test_vunzipo_mask_f32mf2(<vscale x 1 x float> %pass
 define <vscale x 2 x float> @test_vunzipo_f32m1(<vscale x 2 x float> %passthru, <vscale x 4 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f32m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.iXLen(
@@ -686,7 +686,7 @@ define <vscale x 2 x float> @test_vunzipo_mask_f32m1(<vscale x 2 x float> %passt
 define <vscale x 4 x float> @test_vunzipo_f32m2(<vscale x 4 x float> %passthru, <vscale x 8 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f32m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.iXLen(
@@ -713,7 +713,7 @@ define <vscale x 4 x float> @test_vunzipo_mask_f32m2(<vscale x 4 x float> %passt
 define <vscale x 8 x float> @test_vunzipo_f32m4(<vscale x 8 x float> %passthru, <vscale x 16 x float> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f32m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.iXLen(
@@ -741,7 +741,7 @@ define <vscale x 8 x float> @test_vunzipo_mask_f32m4(<vscale x 8 x float> %passt
 define <vscale x 1 x double> @test_vunzipo_f64m1(<vscale x 1 x double> %passthru, <vscale x 2 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f64m1:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
   %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.iXLen(
@@ -768,7 +768,7 @@ define <vscale x 1 x double> @test_vunzipo_mask_f64m1(<vscale x 1 x double> %pas
 define <vscale x 2 x double> @test_vunzipo_f64m2(<vscale x 2 x double> %passthru, <vscale x 4 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f64m2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
   %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.iXLen(
@@ -795,7 +795,7 @@ define <vscale x 2 x double> @test_vunzipo_mask_f64m2(<vscale x 2 x double> %pas
 define <vscale x 4 x double> @test_vunzipo_f64m4(<vscale x 4 x double> %passthru, <vscale x 8 x double> %src, iXLen %vl) nounwind {
 ; CHECK-LABEL: test_vunzipo_f64m4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, ma
+; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
   %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.iXLen(

>From a4dbeaf317691385e3535aa925d32fbc1af13c9f Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 31 Mar 2026 11:42:46 +0800
Subject: [PATCH 4/5] Use LLVMOneNthElementsVectorType for unmasked intrinsics
 and refactor tests

---
 llvm/include/llvm/IR/IntrinsicsRISCV.td |  11 +-
 llvm/test/CodeGen/RISCV/rvv/vpaire.ll   | 148 ++++++++++++------------
 llvm/test/CodeGen/RISCV/rvv/vpairo.ll   | 148 ++++++++++++------------
 llvm/test/CodeGen/RISCV/rvv/vunzipe.ll  | 120 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vunzipo.ll  | 120 +++++++++----------
 llvm/test/CodeGen/RISCV/rvv/vzip.ll     | 120 +++++++++----------
 6 files changed, 335 insertions(+), 332 deletions(-)

diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td
index caf8fa6f9be81..4c5d02f4c4c85 100644
--- a/llvm/include/llvm/IR/IntrinsicsRISCV.td
+++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td
@@ -1947,8 +1947,10 @@ let TargetPrefix = "riscv" in {
     // Input: (passthru, vector_in, vector_in, vl)
     def "int_riscv_" # NAME :
       DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                            [LLVMMatchType<0>, llvm_anyvector_ty,
-                             LLVMMatchType<1>, llvm_anyint_ty],
+                            [LLVMMatchType<0>,
+                             LLVMOneNthElementsVectorType<0, 2>,
+                             LLVMOneNthElementsVectorType<0, 2>,
+                             llvm_anyint_ty],
                             [IntrNoMem]>, RISCVVIntrinsic {
       let VLOperand = 3;
     }
@@ -1968,8 +1970,9 @@ let TargetPrefix = "riscv" in {
   multiclass RISCVUnzip {
     // Input: (passthru, vector_in, vl)
     def "int_riscv_" # NAME :
-      DefaultAttrsIntrinsic<[llvm_anyvector_ty],
-                            [LLVMMatchType<0>, llvm_anyvector_ty,
+      DefaultAttrsIntrinsic<[LLVMOneNthElementsVectorType<0, 2>],
+                            [LLVMOneNthElementsVectorType<0, 2>,
+                             llvm_anyvector_ty,
                              llvm_anyint_ty],
                             [IntrNoMem]>, RISCVVIntrinsic {
       let VLOperand = 2;
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpaire.ll b/llvm/test/CodeGen/RISCV/rvv/vpaire.ll
index 27c5d361a3650..67a49e358baf3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpaire.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpaire.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @test_vpaire_i8mf8(<vscale x 1 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire.nxv1i8.iXLen(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire(
     <vscale x 1 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -24,7 +24,7 @@ define <vscale x 1 x i8> @test_vpaire_mask_i8mf8(<vscale x 1 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 1 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -39,7 +39,7 @@ define <vscale x 2 x i8> @test_vpaire_i8mf4(<vscale x 2 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire.nxv2i8.iXLen(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire(
     <vscale x 2 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -53,7 +53,7 @@ define <vscale x 2 x i8> @test_vpaire_mask_i8mf4(<vscale x 2 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 2 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -68,7 +68,7 @@ define <vscale x 4 x i8> @test_vpaire_i8mf2(<vscale x 4 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire.nxv4i8.iXLen(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire(
     <vscale x 4 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -82,7 +82,7 @@ define <vscale x 4 x i8> @test_vpaire_mask_i8mf2(<vscale x 4 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 4 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -97,7 +97,7 @@ define <vscale x 8 x i8> @test_vpaire_i8m1(<vscale x 8 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire.nxv8i8.iXLen(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire(
     <vscale x 8 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -111,7 +111,7 @@ define <vscale x 8 x i8> @test_vpaire_mask_i8m1(<vscale x 8 x i8> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 8 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -126,7 +126,7 @@ define <vscale x 16 x i8> @test_vpaire_i8m2(<vscale x 16 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire.nxv16i8.iXLen(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire(
     <vscale x 16 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -140,7 +140,7 @@ define <vscale x 16 x i8> @test_vpaire_mask_i8m2(<vscale x 16 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 16 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -155,7 +155,7 @@ define <vscale x 32 x i8> @test_vpaire_i8m4(<vscale x 32 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire.nxv32i8.iXLen(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire(
     <vscale x 32 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -169,7 +169,7 @@ define <vscale x 32 x i8> @test_vpaire_mask_i8m4(<vscale x 32 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 32 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -185,7 +185,7 @@ define <vscale x 64 x i8> @test_vpaire_i8m8(<vscale x 64 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire.nxv64i8.iXLen(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire(
     <vscale x 64 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 64 x i8> %arg2,
@@ -200,7 +200,7 @@ define <vscale x 64 x i8> @test_vpaire_mask_i8m8(<vscale x 64 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpaire.mask(
     <vscale x 64 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 64 x i8> %arg2,
@@ -215,7 +215,7 @@ define <vscale x 1 x i16> @test_vpaire_i16mf4(<vscale x 1 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire.nxv1i16.iXLen(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire(
     <vscale x 1 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -229,7 +229,7 @@ define <vscale x 1 x i16> @test_vpaire_mask_i16mf4(<vscale x 1 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 1 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -244,7 +244,7 @@ define <vscale x 2 x i16> @test_vpaire_i16mf2(<vscale x 2 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire.nxv2i16.iXLen(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire(
     <vscale x 2 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -258,7 +258,7 @@ define <vscale x 2 x i16> @test_vpaire_mask_i16mf2(<vscale x 2 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 2 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -273,7 +273,7 @@ define <vscale x 4 x i16> @test_vpaire_i16m1(<vscale x 4 x i16> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire.nxv4i16.iXLen(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire(
     <vscale x 4 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -287,7 +287,7 @@ define <vscale x 4 x i16> @test_vpaire_mask_i16m1(<vscale x 4 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 4 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -302,7 +302,7 @@ define <vscale x 8 x i16> @test_vpaire_i16m2(<vscale x 8 x i16> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire.nxv8i16.iXLen(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire(
     <vscale x 8 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -316,7 +316,7 @@ define <vscale x 8 x i16> @test_vpaire_mask_i16m2(<vscale x 8 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 8 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -331,7 +331,7 @@ define <vscale x 16 x i16> @test_vpaire_i16m4(<vscale x 16 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire.nxv16i16.iXLen(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire(
     <vscale x 16 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -345,7 +345,7 @@ define <vscale x 16 x i16> @test_vpaire_mask_i16m4(<vscale x 16 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 16 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -361,7 +361,7 @@ define <vscale x 32 x i16> @test_vpaire_i16m8(<vscale x 32 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire.nxv32i16.iXLen(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire(
     <vscale x 32 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 32 x i16> %arg2,
@@ -376,7 +376,7 @@ define <vscale x 32 x i16> @test_vpaire_mask_i16m8(<vscale x 32 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpaire.mask(
     <vscale x 32 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 32 x i16> %arg2,
@@ -391,7 +391,7 @@ define <vscale x 1 x i32> @test_vpaire_i32mf2(<vscale x 1 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire.nxv1i32.iXLen(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire(
     <vscale x 1 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -405,7 +405,7 @@ define <vscale x 1 x i32> @test_vpaire_mask_i32mf2(<vscale x 1 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpaire.mask(
     <vscale x 1 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -420,7 +420,7 @@ define <vscale x 2 x i32> @test_vpaire_i32m1(<vscale x 2 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire.nxv2i32.iXLen(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire(
     <vscale x 2 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -434,7 +434,7 @@ define <vscale x 2 x i32> @test_vpaire_mask_i32m1(<vscale x 2 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpaire.mask(
     <vscale x 2 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -449,7 +449,7 @@ define <vscale x 4 x i32> @test_vpaire_i32m2(<vscale x 4 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire.nxv4i32.iXLen(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire(
     <vscale x 4 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -463,7 +463,7 @@ define <vscale x 4 x i32> @test_vpaire_mask_i32m2(<vscale x 4 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpaire.mask(
     <vscale x 4 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -478,7 +478,7 @@ define <vscale x 8 x i32> @test_vpaire_i32m4(<vscale x 8 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire.nxv8i32.iXLen(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire(
     <vscale x 8 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -492,7 +492,7 @@ define <vscale x 8 x i32> @test_vpaire_mask_i32m4(<vscale x 8 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpaire.mask(
     <vscale x 8 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -508,7 +508,7 @@ define <vscale x 16 x i32> @test_vpaire_i32m8(<vscale x 16 x i32> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire.nxv16i32.iXLen(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire(
     <vscale x 16 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 16 x i32> %arg2,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @test_vpaire_mask_i32m8(<vscale x 16 x i32> %passthru
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpaire.mask(
     <vscale x 16 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 16 x i32> %arg2,
@@ -538,7 +538,7 @@ define <vscale x 1 x i64> @test_vpaire_i64m1(<vscale x 1 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire.nxv1i64.iXLen(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire(
     <vscale x 1 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -552,7 +552,7 @@ define <vscale x 1 x i64> @test_vpaire_mask_i64m1(<vscale x 1 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask.nxv1i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpaire.mask(
     <vscale x 1 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -567,7 +567,7 @@ define <vscale x 2 x i64> @test_vpaire_i64m2(<vscale x 2 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire.nxv2i64.iXLen(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire(
     <vscale x 2 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -581,7 +581,7 @@ define <vscale x 2 x i64> @test_vpaire_mask_i64m2(<vscale x 2 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask.nxv2i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpaire.mask(
     <vscale x 2 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -596,7 +596,7 @@ define <vscale x 4 x i64> @test_vpaire_i64m4(<vscale x 4 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire.nxv4i64.iXLen(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire(
     <vscale x 4 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64> @test_vpaire_mask_i64m4(<vscale x 4 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask.nxv4i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpaire.mask(
     <vscale x 4 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -626,7 +626,7 @@ define <vscale x 8 x i64> @test_vpaire_i64m8(<vscale x 8 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire.nxv8i64.iXLen(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire(
     <vscale x 8 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 8 x i64> %arg2,
@@ -641,7 +641,7 @@ define <vscale x 8 x i64> @test_vpaire_mask_i64m8(<vscale x 8 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask.nxv8i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpaire.mask(
     <vscale x 8 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 8 x i64> %arg2,
@@ -659,7 +659,7 @@ define <vscale x 1 x half> @test_vpaire_f16mf4(<vscale x 1 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vpaire.nxv1f16.iXLen(
+  %r = call <vscale x 1 x half> @llvm.riscv.vpaire(
     <vscale x 1 x half> %passthru,
     <vscale x 1 x half> %a1,
     <vscale x 1 x half> %a2,
@@ -673,7 +673,7 @@ define <vscale x 1 x half> @test_vpaire_mask_f16mf4(<vscale x 1 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vpaire.mask.nxv1f16(
+  %r = call <vscale x 1 x half> @llvm.riscv.vpaire.mask(
     <vscale x 1 x half> %passthru,
     <vscale x 1 x half> %a1,
     <vscale x 1 x half> %a2,
@@ -689,7 +689,7 @@ define <vscale x 2 x half> @test_vpaire_f16mf2(<vscale x 2 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vpaire.nxv2f16.iXLen(
+  %r = call <vscale x 2 x half> @llvm.riscv.vpaire(
     <vscale x 2 x half> %passthru,
     <vscale x 2 x half> %a1,
     <vscale x 2 x half> %a2,
@@ -703,7 +703,7 @@ define <vscale x 2 x half> @test_vpaire_mask_f16mf2(<vscale x 2 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vpaire.mask.nxv2f16(
+  %r = call <vscale x 2 x half> @llvm.riscv.vpaire.mask(
     <vscale x 2 x half> %passthru,
     <vscale x 2 x half> %a1,
     <vscale x 2 x half> %a2,
@@ -719,7 +719,7 @@ define <vscale x 4 x half> @test_vpaire_f16m1(<vscale x 4 x half> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vpaire.nxv4f16.iXLen(
+  %r = call <vscale x 4 x half> @llvm.riscv.vpaire(
     <vscale x 4 x half> %passthru,
     <vscale x 4 x half> %a1,
     <vscale x 4 x half> %a2,
@@ -733,7 +733,7 @@ define <vscale x 4 x half> @test_vpaire_mask_f16m1(<vscale x 4 x half> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vpaire.mask.nxv4f16(
+  %r = call <vscale x 4 x half> @llvm.riscv.vpaire.mask(
     <vscale x 4 x half> %passthru,
     <vscale x 4 x half> %a1,
     <vscale x 4 x half> %a2,
@@ -749,7 +749,7 @@ define <vscale x 8 x half> @test_vpaire_f16m2(<vscale x 8 x half> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vpaire.nxv8f16.iXLen(
+  %r = call <vscale x 8 x half> @llvm.riscv.vpaire(
     <vscale x 8 x half> %passthru,
     <vscale x 8 x half> %a1,
     <vscale x 8 x half> %a2,
@@ -763,7 +763,7 @@ define <vscale x 8 x half> @test_vpaire_mask_f16m2(<vscale x 8 x half> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vpaire.mask.nxv8f16(
+  %r = call <vscale x 8 x half> @llvm.riscv.vpaire.mask(
     <vscale x 8 x half> %passthru,
     <vscale x 8 x half> %a1,
     <vscale x 8 x half> %a2,
@@ -779,7 +779,7 @@ define <vscale x 16 x half> @test_vpaire_f16m4(<vscale x 16 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vpaire.nxv16f16.iXLen(
+  %r = call <vscale x 16 x half> @llvm.riscv.vpaire(
     <vscale x 16 x half> %passthru,
     <vscale x 16 x half> %a1,
     <vscale x 16 x half> %a2,
@@ -793,7 +793,7 @@ define <vscale x 16 x half> @test_vpaire_mask_f16m4(<vscale x 16 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vpaire.mask.nxv16f16(
+  %r = call <vscale x 16 x half> @llvm.riscv.vpaire.mask(
     <vscale x 16 x half> %passthru,
     <vscale x 16 x half> %a1,
     <vscale x 16 x half> %a2,
@@ -810,7 +810,7 @@ define <vscale x 32 x half> @test_vpaire_f16m8(<vscale x 32 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vpaire.nxv32f16.iXLen(
+  %r = call <vscale x 32 x half> @llvm.riscv.vpaire(
     <vscale x 32 x half> %passthru,
     <vscale x 32 x half> %a1,
     <vscale x 32 x half> %a2,
@@ -825,7 +825,7 @@ define <vscale x 32 x half> @test_vpaire_mask_f16m8(<vscale x 32 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vpaire.mask.nxv32f16(
+  %r = call <vscale x 32 x half> @llvm.riscv.vpaire.mask(
     <vscale x 32 x half> %passthru,
     <vscale x 32 x half> %a1,
     <vscale x 32 x half> %a2,
@@ -842,7 +842,7 @@ define <vscale x 1 x float> @test_vpaire_f32mf2(<vscale x 1 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vpaire.nxv1f32.iXLen(
+  %r = call <vscale x 1 x float> @llvm.riscv.vpaire(
     <vscale x 1 x float> %passthru,
     <vscale x 1 x float> %a1,
     <vscale x 1 x float> %a2,
@@ -856,7 +856,7 @@ define <vscale x 1 x float> @test_vpaire_mask_f32mf2(<vscale x 1 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vpaire.mask.nxv1f32(
+  %r = call <vscale x 1 x float> @llvm.riscv.vpaire.mask(
     <vscale x 1 x float> %passthru,
     <vscale x 1 x float> %a1,
     <vscale x 1 x float> %a2,
@@ -872,7 +872,7 @@ define <vscale x 2 x float> @test_vpaire_f32m1(<vscale x 2 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vpaire.nxv2f32.iXLen(
+  %r = call <vscale x 2 x float> @llvm.riscv.vpaire(
     <vscale x 2 x float> %passthru,
     <vscale x 2 x float> %a1,
     <vscale x 2 x float> %a2,
@@ -886,7 +886,7 @@ define <vscale x 2 x float> @test_vpaire_mask_f32m1(<vscale x 2 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vpaire.mask.nxv2f32(
+  %r = call <vscale x 2 x float> @llvm.riscv.vpaire.mask(
     <vscale x 2 x float> %passthru,
     <vscale x 2 x float> %a1,
     <vscale x 2 x float> %a2,
@@ -902,7 +902,7 @@ define <vscale x 4 x float> @test_vpaire_f32m2(<vscale x 4 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vpaire.nxv4f32.iXLen(
+  %r = call <vscale x 4 x float> @llvm.riscv.vpaire(
     <vscale x 4 x float> %passthru,
     <vscale x 4 x float> %a1,
     <vscale x 4 x float> %a2,
@@ -916,7 +916,7 @@ define <vscale x 4 x float> @test_vpaire_mask_f32m2(<vscale x 4 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vpaire.mask.nxv4f32(
+  %r = call <vscale x 4 x float> @llvm.riscv.vpaire.mask(
     <vscale x 4 x float> %passthru,
     <vscale x 4 x float> %a1,
     <vscale x 4 x float> %a2,
@@ -932,7 +932,7 @@ define <vscale x 8 x float> @test_vpaire_f32m4(<vscale x 8 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vpaire.nxv8f32.iXLen(
+  %r = call <vscale x 8 x float> @llvm.riscv.vpaire(
     <vscale x 8 x float> %passthru,
     <vscale x 8 x float> %a1,
     <vscale x 8 x float> %a2,
@@ -946,7 +946,7 @@ define <vscale x 8 x float> @test_vpaire_mask_f32m4(<vscale x 8 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vpaire.mask.nxv8f32(
+  %r = call <vscale x 8 x float> @llvm.riscv.vpaire.mask(
     <vscale x 8 x float> %passthru,
     <vscale x 8 x float> %a1,
     <vscale x 8 x float> %a2,
@@ -963,7 +963,7 @@ define <vscale x 16 x float> @test_vpaire_f32m8(<vscale x 16 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vpaire.nxv16f32.iXLen(
+  %r = call <vscale x 16 x float> @llvm.riscv.vpaire(
     <vscale x 16 x float> %passthru,
     <vscale x 16 x float> %a1,
     <vscale x 16 x float> %a2,
@@ -978,7 +978,7 @@ define <vscale x 16 x float> @test_vpaire_mask_f32m8(<vscale x 16 x float> %pass
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vpaire.mask.nxv16f32(
+  %r = call <vscale x 16 x float> @llvm.riscv.vpaire.mask(
     <vscale x 16 x float> %passthru,
     <vscale x 16 x float> %a1,
     <vscale x 16 x float> %a2,
@@ -995,7 +995,7 @@ define <vscale x 1 x double> @test_vpaire_f64m1(<vscale x 1 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vpaire.nxv1f64.iXLen(
+  %r = call <vscale x 1 x double> @llvm.riscv.vpaire(
     <vscale x 1 x double> %passthru,
     <vscale x 1 x double> %a1,
     <vscale x 1 x double> %a2,
@@ -1009,7 +1009,7 @@ define <vscale x 1 x double> @test_vpaire_mask_f64m1(<vscale x 1 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vpaire.mask.nxv1f64(
+  %r = call <vscale x 1 x double> @llvm.riscv.vpaire.mask(
     <vscale x 1 x double> %passthru,
     <vscale x 1 x double> %a1,
     <vscale x 1 x double> %a2,
@@ -1025,7 +1025,7 @@ define <vscale x 2 x double> @test_vpaire_f64m2(<vscale x 2 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vpaire.nxv2f64.iXLen(
+  %r = call <vscale x 2 x double> @llvm.riscv.vpaire(
     <vscale x 2 x double> %passthru,
     <vscale x 2 x double> %a1,
     <vscale x 2 x double> %a2,
@@ -1039,7 +1039,7 @@ define <vscale x 2 x double> @test_vpaire_mask_f64m2(<vscale x 2 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vpaire.mask.nxv2f64(
+  %r = call <vscale x 2 x double> @llvm.riscv.vpaire.mask(
     <vscale x 2 x double> %passthru,
     <vscale x 2 x double> %a1,
     <vscale x 2 x double> %a2,
@@ -1055,7 +1055,7 @@ define <vscale x 4 x double> @test_vpaire_f64m4(<vscale x 4 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vpaire.nxv4f64.iXLen(
+  %r = call <vscale x 4 x double> @llvm.riscv.vpaire(
     <vscale x 4 x double> %passthru,
     <vscale x 4 x double> %a1,
     <vscale x 4 x double> %a2,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x double> @test_vpaire_mask_f64m4(<vscale x 4 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vpaire.mask.nxv4f64(
+  %r = call <vscale x 4 x double> @llvm.riscv.vpaire.mask(
     <vscale x 4 x double> %passthru,
     <vscale x 4 x double> %a1,
     <vscale x 4 x double> %a2,
@@ -1086,7 +1086,7 @@ define <vscale x 8 x double> @test_vpaire_f64m8(<vscale x 8 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vpaire.nxv8f64.iXLen(
+  %r = call <vscale x 8 x double> @llvm.riscv.vpaire(
     <vscale x 8 x double> %passthru,
     <vscale x 8 x double> %a1,
     <vscale x 8 x double> %a2,
@@ -1101,7 +1101,7 @@ define <vscale x 8 x double> @test_vpaire_mask_f64m8(<vscale x 8 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vpaire.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vpaire.mask.nxv8f64(
+  %r = call <vscale x 8 x double> @llvm.riscv.vpaire.mask(
     <vscale x 8 x double> %passthru,
     <vscale x 8 x double> %a1,
     <vscale x 8 x double> %a2,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vpairo.ll b/llvm/test/CodeGen/RISCV/rvv/vpairo.ll
index f814e3d1207f4..650299be61896 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpairo.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpairo.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @test_vpairo_i8mf8(<vscale x 1 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo.nxv1i8.iXLen(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo(
     <vscale x 1 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -24,7 +24,7 @@ define <vscale x 1 x i8> @test_vpairo_mask_i8mf8(<vscale x 1 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask.nxv1i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 1 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -39,7 +39,7 @@ define <vscale x 2 x i8> @test_vpairo_i8mf4(<vscale x 2 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo.nxv2i8.iXLen(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo(
     <vscale x 2 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -53,7 +53,7 @@ define <vscale x 2 x i8> @test_vpairo_mask_i8mf4(<vscale x 2 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask.nxv2i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 2 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -68,7 +68,7 @@ define <vscale x 4 x i8> @test_vpairo_i8mf2(<vscale x 4 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo.nxv4i8.iXLen(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo(
     <vscale x 4 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -82,7 +82,7 @@ define <vscale x 4 x i8> @test_vpairo_mask_i8mf2(<vscale x 4 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask.nxv4i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 4 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -97,7 +97,7 @@ define <vscale x 8 x i8> @test_vpairo_i8m1(<vscale x 8 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo.nxv8i8.iXLen(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo(
     <vscale x 8 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -111,7 +111,7 @@ define <vscale x 8 x i8> @test_vpairo_mask_i8m1(<vscale x 8 x i8> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask.nxv8i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 8 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -126,7 +126,7 @@ define <vscale x 16 x i8> @test_vpairo_i8m2(<vscale x 16 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo.nxv16i8.iXLen(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo(
     <vscale x 16 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -140,7 +140,7 @@ define <vscale x 16 x i8> @test_vpairo_mask_i8m2(<vscale x 16 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask.nxv16i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 16 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -155,7 +155,7 @@ define <vscale x 32 x i8> @test_vpairo_i8m4(<vscale x 32 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo.nxv32i8.iXLen(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo(
     <vscale x 32 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -169,7 +169,7 @@ define <vscale x 32 x i8> @test_vpairo_mask_i8m4(<vscale x 32 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask.nxv32i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 32 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -185,7 +185,7 @@ define <vscale x 64 x i8> @test_vpairo_i8m8(<vscale x 64 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo.nxv64i8.iXLen(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo(
     <vscale x 64 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 64 x i8> %arg2,
@@ -200,7 +200,7 @@ define <vscale x 64 x i8> @test_vpairo_mask_i8m8(<vscale x 64 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask.nxv64i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vpairo.mask(
     <vscale x 64 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 64 x i8> %arg2,
@@ -215,7 +215,7 @@ define <vscale x 1 x i16> @test_vpairo_i16mf4(<vscale x 1 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo.nxv1i16.iXLen(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo(
     <vscale x 1 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -229,7 +229,7 @@ define <vscale x 1 x i16> @test_vpairo_mask_i16mf4(<vscale x 1 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask.nxv1i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 1 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -244,7 +244,7 @@ define <vscale x 2 x i16> @test_vpairo_i16mf2(<vscale x 2 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo.nxv2i16.iXLen(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo(
     <vscale x 2 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -258,7 +258,7 @@ define <vscale x 2 x i16> @test_vpairo_mask_i16mf2(<vscale x 2 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask.nxv2i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 2 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -273,7 +273,7 @@ define <vscale x 4 x i16> @test_vpairo_i16m1(<vscale x 4 x i16> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo.nxv4i16.iXLen(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo(
     <vscale x 4 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -287,7 +287,7 @@ define <vscale x 4 x i16> @test_vpairo_mask_i16m1(<vscale x 4 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask.nxv4i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 4 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -302,7 +302,7 @@ define <vscale x 8 x i16> @test_vpairo_i16m2(<vscale x 8 x i16> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo.nxv8i16.iXLen(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo(
     <vscale x 8 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -316,7 +316,7 @@ define <vscale x 8 x i16> @test_vpairo_mask_i16m2(<vscale x 8 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask.nxv8i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 8 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -331,7 +331,7 @@ define <vscale x 16 x i16> @test_vpairo_i16m4(<vscale x 16 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo.nxv16i16.iXLen(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo(
     <vscale x 16 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -345,7 +345,7 @@ define <vscale x 16 x i16> @test_vpairo_mask_i16m4(<vscale x 16 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask.nxv16i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 16 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -361,7 +361,7 @@ define <vscale x 32 x i16> @test_vpairo_i16m8(<vscale x 32 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo.nxv32i16.iXLen(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo(
     <vscale x 32 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 32 x i16> %arg2,
@@ -376,7 +376,7 @@ define <vscale x 32 x i16> @test_vpairo_mask_i16m8(<vscale x 32 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask.nxv32i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vpairo.mask(
     <vscale x 32 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 32 x i16> %arg2,
@@ -391,7 +391,7 @@ define <vscale x 1 x i32> @test_vpairo_i32mf2(<vscale x 1 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo.nxv1i32.iXLen(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo(
     <vscale x 1 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -405,7 +405,7 @@ define <vscale x 1 x i32> @test_vpairo_mask_i32mf2(<vscale x 1 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask.nxv1i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vpairo.mask(
     <vscale x 1 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -420,7 +420,7 @@ define <vscale x 2 x i32> @test_vpairo_i32m1(<vscale x 2 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo.nxv2i32.iXLen(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo(
     <vscale x 2 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -434,7 +434,7 @@ define <vscale x 2 x i32> @test_vpairo_mask_i32m1(<vscale x 2 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask.nxv2i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vpairo.mask(
     <vscale x 2 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -449,7 +449,7 @@ define <vscale x 4 x i32> @test_vpairo_i32m2(<vscale x 4 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo.nxv4i32.iXLen(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo(
     <vscale x 4 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -463,7 +463,7 @@ define <vscale x 4 x i32> @test_vpairo_mask_i32m2(<vscale x 4 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask.nxv4i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vpairo.mask(
     <vscale x 4 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -478,7 +478,7 @@ define <vscale x 8 x i32> @test_vpairo_i32m4(<vscale x 8 x i32> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo.nxv8i32.iXLen(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo(
     <vscale x 8 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -492,7 +492,7 @@ define <vscale x 8 x i32> @test_vpairo_mask_i32m4(<vscale x 8 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask.nxv8i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vpairo.mask(
     <vscale x 8 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -508,7 +508,7 @@ define <vscale x 16 x i32> @test_vpairo_i32m8(<vscale x 16 x i32> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo.nxv16i32.iXLen(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo(
     <vscale x 16 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 16 x i32> %arg2,
@@ -523,7 +523,7 @@ define <vscale x 16 x i32> @test_vpairo_mask_i32m8(<vscale x 16 x i32> %passthru
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask.nxv16i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vpairo.mask(
     <vscale x 16 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 16 x i32> %arg2,
@@ -538,7 +538,7 @@ define <vscale x 1 x i64> @test_vpairo_i64m1(<vscale x 1 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo.nxv1i64.iXLen(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo(
     <vscale x 1 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -552,7 +552,7 @@ define <vscale x 1 x i64> @test_vpairo_mask_i64m1(<vscale x 1 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask.nxv1i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vpairo.mask(
     <vscale x 1 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -567,7 +567,7 @@ define <vscale x 2 x i64> @test_vpairo_i64m2(<vscale x 2 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo.nxv2i64.iXLen(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo(
     <vscale x 2 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -581,7 +581,7 @@ define <vscale x 2 x i64> @test_vpairo_mask_i64m2(<vscale x 2 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask.nxv2i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vpairo.mask(
     <vscale x 2 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -596,7 +596,7 @@ define <vscale x 4 x i64> @test_vpairo_i64m4(<vscale x 4 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo.nxv4i64.iXLen(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo(
     <vscale x 4 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -610,7 +610,7 @@ define <vscale x 4 x i64> @test_vpairo_mask_i64m4(<vscale x 4 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask.nxv4i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vpairo.mask(
     <vscale x 4 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -626,7 +626,7 @@ define <vscale x 8 x i64> @test_vpairo_i64m8(<vscale x 8 x i64> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo.nxv8i64.iXLen(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo(
     <vscale x 8 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 8 x i64> %arg2,
@@ -641,7 +641,7 @@ define <vscale x 8 x i64> @test_vpairo_mask_i64m8(<vscale x 8 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask.nxv8i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vpairo.mask(
     <vscale x 8 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 8 x i64> %arg2,
@@ -659,7 +659,7 @@ define <vscale x 1 x half> @test_vpairo_f16mf4(<vscale x 1 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vpairo.nxv1f16.iXLen(
+  %r = call <vscale x 1 x half> @llvm.riscv.vpairo(
     <vscale x 1 x half> %passthru,
     <vscale x 1 x half> %a1,
     <vscale x 1 x half> %a2,
@@ -673,7 +673,7 @@ define <vscale x 1 x half> @test_vpairo_mask_f16mf4(<vscale x 1 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vpairo.mask.nxv1f16(
+  %r = call <vscale x 1 x half> @llvm.riscv.vpairo.mask(
     <vscale x 1 x half> %passthru,
     <vscale x 1 x half> %a1,
     <vscale x 1 x half> %a2,
@@ -689,7 +689,7 @@ define <vscale x 2 x half> @test_vpairo_f16mf2(<vscale x 2 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vpairo.nxv2f16.iXLen(
+  %r = call <vscale x 2 x half> @llvm.riscv.vpairo(
     <vscale x 2 x half> %passthru,
     <vscale x 2 x half> %a1,
     <vscale x 2 x half> %a2,
@@ -703,7 +703,7 @@ define <vscale x 2 x half> @test_vpairo_mask_f16mf2(<vscale x 2 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vpairo.mask.nxv2f16(
+  %r = call <vscale x 2 x half> @llvm.riscv.vpairo.mask(
     <vscale x 2 x half> %passthru,
     <vscale x 2 x half> %a1,
     <vscale x 2 x half> %a2,
@@ -719,7 +719,7 @@ define <vscale x 4 x half> @test_vpairo_f16m1(<vscale x 4 x half> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vpairo.nxv4f16.iXLen(
+  %r = call <vscale x 4 x half> @llvm.riscv.vpairo(
     <vscale x 4 x half> %passthru,
     <vscale x 4 x half> %a1,
     <vscale x 4 x half> %a2,
@@ -733,7 +733,7 @@ define <vscale x 4 x half> @test_vpairo_mask_f16m1(<vscale x 4 x half> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vpairo.mask.nxv4f16(
+  %r = call <vscale x 4 x half> @llvm.riscv.vpairo.mask(
     <vscale x 4 x half> %passthru,
     <vscale x 4 x half> %a1,
     <vscale x 4 x half> %a2,
@@ -749,7 +749,7 @@ define <vscale x 8 x half> @test_vpairo_f16m2(<vscale x 8 x half> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vpairo.nxv8f16.iXLen(
+  %r = call <vscale x 8 x half> @llvm.riscv.vpairo(
     <vscale x 8 x half> %passthru,
     <vscale x 8 x half> %a1,
     <vscale x 8 x half> %a2,
@@ -763,7 +763,7 @@ define <vscale x 8 x half> @test_vpairo_mask_f16m2(<vscale x 8 x half> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vpairo.mask.nxv8f16(
+  %r = call <vscale x 8 x half> @llvm.riscv.vpairo.mask(
     <vscale x 8 x half> %passthru,
     <vscale x 8 x half> %a1,
     <vscale x 8 x half> %a2,
@@ -779,7 +779,7 @@ define <vscale x 16 x half> @test_vpairo_f16m4(<vscale x 16 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vpairo.nxv16f16.iXLen(
+  %r = call <vscale x 16 x half> @llvm.riscv.vpairo(
     <vscale x 16 x half> %passthru,
     <vscale x 16 x half> %a1,
     <vscale x 16 x half> %a2,
@@ -793,7 +793,7 @@ define <vscale x 16 x half> @test_vpairo_mask_f16m4(<vscale x 16 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vpairo.mask.nxv16f16(
+  %r = call <vscale x 16 x half> @llvm.riscv.vpairo.mask(
     <vscale x 16 x half> %passthru,
     <vscale x 16 x half> %a1,
     <vscale x 16 x half> %a2,
@@ -810,7 +810,7 @@ define <vscale x 32 x half> @test_vpairo_f16m8(<vscale x 32 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vpairo.nxv32f16.iXLen(
+  %r = call <vscale x 32 x half> @llvm.riscv.vpairo(
     <vscale x 32 x half> %passthru,
     <vscale x 32 x half> %a1,
     <vscale x 32 x half> %a2,
@@ -825,7 +825,7 @@ define <vscale x 32 x half> @test_vpairo_mask_f16m8(<vscale x 32 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vpairo.mask.nxv32f16(
+  %r = call <vscale x 32 x half> @llvm.riscv.vpairo.mask(
     <vscale x 32 x half> %passthru,
     <vscale x 32 x half> %a1,
     <vscale x 32 x half> %a2,
@@ -842,7 +842,7 @@ define <vscale x 1 x float> @test_vpairo_f32mf2(<vscale x 1 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vpairo.nxv1f32.iXLen(
+  %r = call <vscale x 1 x float> @llvm.riscv.vpairo(
     <vscale x 1 x float> %passthru,
     <vscale x 1 x float> %a1,
     <vscale x 1 x float> %a2,
@@ -856,7 +856,7 @@ define <vscale x 1 x float> @test_vpairo_mask_f32mf2(<vscale x 1 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vpairo.mask.nxv1f32(
+  %r = call <vscale x 1 x float> @llvm.riscv.vpairo.mask(
     <vscale x 1 x float> %passthru,
     <vscale x 1 x float> %a1,
     <vscale x 1 x float> %a2,
@@ -872,7 +872,7 @@ define <vscale x 2 x float> @test_vpairo_f32m1(<vscale x 2 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vpairo.nxv2f32.iXLen(
+  %r = call <vscale x 2 x float> @llvm.riscv.vpairo(
     <vscale x 2 x float> %passthru,
     <vscale x 2 x float> %a1,
     <vscale x 2 x float> %a2,
@@ -886,7 +886,7 @@ define <vscale x 2 x float> @test_vpairo_mask_f32m1(<vscale x 2 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vpairo.mask.nxv2f32(
+  %r = call <vscale x 2 x float> @llvm.riscv.vpairo.mask(
     <vscale x 2 x float> %passthru,
     <vscale x 2 x float> %a1,
     <vscale x 2 x float> %a2,
@@ -902,7 +902,7 @@ define <vscale x 4 x float> @test_vpairo_f32m2(<vscale x 4 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vpairo.nxv4f32.iXLen(
+  %r = call <vscale x 4 x float> @llvm.riscv.vpairo(
     <vscale x 4 x float> %passthru,
     <vscale x 4 x float> %a1,
     <vscale x 4 x float> %a2,
@@ -916,7 +916,7 @@ define <vscale x 4 x float> @test_vpairo_mask_f32m2(<vscale x 4 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vpairo.mask.nxv4f32(
+  %r = call <vscale x 4 x float> @llvm.riscv.vpairo.mask(
     <vscale x 4 x float> %passthru,
     <vscale x 4 x float> %a1,
     <vscale x 4 x float> %a2,
@@ -932,7 +932,7 @@ define <vscale x 8 x float> @test_vpairo_f32m4(<vscale x 8 x float> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vpairo.nxv8f32.iXLen(
+  %r = call <vscale x 8 x float> @llvm.riscv.vpairo(
     <vscale x 8 x float> %passthru,
     <vscale x 8 x float> %a1,
     <vscale x 8 x float> %a2,
@@ -946,7 +946,7 @@ define <vscale x 8 x float> @test_vpairo_mask_f32m4(<vscale x 8 x float> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vpairo.mask.nxv8f32(
+  %r = call <vscale x 8 x float> @llvm.riscv.vpairo.mask(
     <vscale x 8 x float> %passthru,
     <vscale x 8 x float> %a1,
     <vscale x 8 x float> %a2,
@@ -963,7 +963,7 @@ define <vscale x 16 x float> @test_vpairo_f32m8(<vscale x 16 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vpairo.nxv16f32.iXLen(
+  %r = call <vscale x 16 x float> @llvm.riscv.vpairo(
     <vscale x 16 x float> %passthru,
     <vscale x 16 x float> %a1,
     <vscale x 16 x float> %a2,
@@ -978,7 +978,7 @@ define <vscale x 16 x float> @test_vpairo_mask_f32m8(<vscale x 16 x float> %pass
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vpairo.mask.nxv16f32(
+  %r = call <vscale x 16 x float> @llvm.riscv.vpairo.mask(
     <vscale x 16 x float> %passthru,
     <vscale x 16 x float> %a1,
     <vscale x 16 x float> %a2,
@@ -995,7 +995,7 @@ define <vscale x 1 x double> @test_vpairo_f64m1(<vscale x 1 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vpairo.nxv1f64.iXLen(
+  %r = call <vscale x 1 x double> @llvm.riscv.vpairo(
     <vscale x 1 x double> %passthru,
     <vscale x 1 x double> %a1,
     <vscale x 1 x double> %a2,
@@ -1009,7 +1009,7 @@ define <vscale x 1 x double> @test_vpairo_mask_f64m1(<vscale x 1 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vpairo.mask.nxv1f64(
+  %r = call <vscale x 1 x double> @llvm.riscv.vpairo.mask(
     <vscale x 1 x double> %passthru,
     <vscale x 1 x double> %a1,
     <vscale x 1 x double> %a2,
@@ -1025,7 +1025,7 @@ define <vscale x 2 x double> @test_vpairo_f64m2(<vscale x 2 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vpairo.nxv2f64.iXLen(
+  %r = call <vscale x 2 x double> @llvm.riscv.vpairo(
     <vscale x 2 x double> %passthru,
     <vscale x 2 x double> %a1,
     <vscale x 2 x double> %a2,
@@ -1039,7 +1039,7 @@ define <vscale x 2 x double> @test_vpairo_mask_f64m2(<vscale x 2 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v10, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vpairo.mask.nxv2f64(
+  %r = call <vscale x 2 x double> @llvm.riscv.vpairo.mask(
     <vscale x 2 x double> %passthru,
     <vscale x 2 x double> %a1,
     <vscale x 2 x double> %a2,
@@ -1055,7 +1055,7 @@ define <vscale x 4 x double> @test_vpairo_f64m4(<vscale x 4 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vpairo.nxv4f64.iXLen(
+  %r = call <vscale x 4 x double> @llvm.riscv.vpairo(
     <vscale x 4 x double> %passthru,
     <vscale x 4 x double> %a1,
     <vscale x 4 x double> %a2,
@@ -1069,7 +1069,7 @@ define <vscale x 4 x double> @test_vpairo_mask_f64m4(<vscale x 4 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v12, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vpairo.mask.nxv4f64(
+  %r = call <vscale x 4 x double> @llvm.riscv.vpairo.mask(
     <vscale x 4 x double> %passthru,
     <vscale x 4 x double> %a1,
     <vscale x 4 x double> %a2,
@@ -1086,7 +1086,7 @@ define <vscale x 8 x double> @test_vpairo_f64m8(<vscale x 8 x double> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, ma
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vpairo.nxv8f64.iXLen(
+  %r = call <vscale x 8 x double> @llvm.riscv.vpairo(
     <vscale x 8 x double> %passthru,
     <vscale x 8 x double> %a1,
     <vscale x 8 x double> %a2,
@@ -1101,7 +1101,7 @@ define <vscale x 8 x double> @test_vpairo_mask_f64m8(<vscale x 8 x double> %pass
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
 ; CHECK-NEXT:    vpairo.vv v8, v16, v24, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vpairo.mask.nxv8f64(
+  %r = call <vscale x 8 x double> @llvm.riscv.vpairo.mask(
     <vscale x 8 x double> %passthru,
     <vscale x 8 x double> %a1,
     <vscale x 8 x double> %a2,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
index 496c21c0676c6..67fe8706914e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipe.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @test_vunzipe_i8mf8(<vscale x 1 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.nxv1i8.nxv2i8.iXLen(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe(
     <vscale x 1 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     iXLen %vl)
@@ -23,7 +23,7 @@ define <vscale x 1 x i8> @test_vunzipe_mask_i8mf8(<vscale x 1 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask.nxv1i8.nxv2i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 1 x i1> %mask,
@@ -37,7 +37,7 @@ define <vscale x 2 x i8> @test_vunzipe_i8mf4(<vscale x 2 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.nxv2i8.nxv4i8.iXLen(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe(
     <vscale x 2 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     iXLen %vl)
@@ -50,7 +50,7 @@ define <vscale x 2 x i8> @test_vunzipe_mask_i8mf4(<vscale x 2 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask.nxv2i8.nxv4i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 2 x i1> %mask,
@@ -64,7 +64,7 @@ define <vscale x 4 x i8> @test_vunzipe_i8mf2(<vscale x 4 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.nxv4i8.nxv8i8.iXLen(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe(
     <vscale x 4 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     iXLen %vl)
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @test_vunzipe_mask_i8mf2(<vscale x 4 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask.nxv4i8.nxv8i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 4 x i1> %mask,
@@ -91,7 +91,7 @@ define <vscale x 8 x i8> @test_vunzipe_i8m1(<vscale x 8 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.nxv8i8.nxv16i8.iXLen(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe(
     <vscale x 8 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     iXLen %vl)
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @test_vunzipe_mask_i8m1(<vscale x 8 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask.nxv8i8.nxv16i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 8 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 8 x i1> %mask,
@@ -118,7 +118,7 @@ define <vscale x 16 x i8> @test_vunzipe_i8m2(<vscale x 16 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.nxv16i8.nxv32i8.iXLen(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe(
     <vscale x 16 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     iXLen %vl)
@@ -131,7 +131,7 @@ define <vscale x 16 x i8> @test_vunzipe_mask_i8m2(<vscale x 16 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask.nxv16i8.nxv32i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 16 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 16 x i1> %mask,
@@ -145,7 +145,7 @@ define <vscale x 32 x i8> @test_vunzipe_i8m4(<vscale x 32 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.nxv32i8.nxv64i8.iXLen(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe(
     <vscale x 32 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     iXLen %vl)
@@ -158,7 +158,7 @@ define <vscale x 32 x i8> @test_vunzipe_mask_i8m4(<vscale x 32 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask.nxv32i8.nxv64i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipe.mask(
     <vscale x 32 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 32 x i1> %mask,
@@ -172,7 +172,7 @@ define <vscale x 1 x i16> @test_vunzipe_i16mf4(<vscale x 1 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.nxv1i16.nxv2i16.iXLen(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe(
     <vscale x 1 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     iXLen %vl)
@@ -185,7 +185,7 @@ define <vscale x 1 x i16> @test_vunzipe_mask_i16mf4(<vscale x 1 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask.nxv1i16.nxv2i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 1 x i1> %mask,
@@ -199,7 +199,7 @@ define <vscale x 2 x i16> @test_vunzipe_i16mf2(<vscale x 2 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.nxv2i16.nxv4i16.iXLen(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe(
     <vscale x 2 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     iXLen %vl)
@@ -212,7 +212,7 @@ define <vscale x 2 x i16> @test_vunzipe_mask_i16mf2(<vscale x 2 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask.nxv2i16.nxv4i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 2 x i1> %mask,
@@ -226,7 +226,7 @@ define <vscale x 4 x i16> @test_vunzipe_i16m1(<vscale x 4 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.nxv4i16.nxv8i16.iXLen(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe(
     <vscale x 4 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     iXLen %vl)
@@ -239,7 +239,7 @@ define <vscale x 4 x i16> @test_vunzipe_mask_i16m1(<vscale x 4 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask.nxv4i16.nxv8i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 4 x i1> %mask,
@@ -253,7 +253,7 @@ define <vscale x 8 x i16> @test_vunzipe_i16m2(<vscale x 8 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.nxv8i16.nxv16i16.iXLen(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe(
     <vscale x 8 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     iXLen %vl)
@@ -266,7 +266,7 @@ define <vscale x 8 x i16> @test_vunzipe_mask_i16m2(<vscale x 8 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask.nxv8i16.nxv16i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipe.mask(
     <vscale x 8 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 8 x i1> %mask,
@@ -280,7 +280,7 @@ define <vscale x 16 x i16> @test_vunzipe_i16m4(<vscale x 16 x i16> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.nxv16i16.nxv32i16.iXLen(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe(
     <vscale x 16 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     iXLen %vl)
@@ -293,7 +293,7 @@ define <vscale x 16 x i16> @test_vunzipe_mask_i16m4(<vscale x 16 x i16> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask.nxv16i16.nxv32i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipe.mask(
     <vscale x 16 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 16 x i1> %mask,
@@ -307,7 +307,7 @@ define <vscale x 1 x i32> @test_vunzipe_i32mf2(<vscale x 1 x i32> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.nxv1i32.nxv2i32.iXLen(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe(
     <vscale x 1 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     iXLen %vl)
@@ -320,7 +320,7 @@ define <vscale x 1 x i32> @test_vunzipe_mask_i32mf2(<vscale x 1 x i32> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask.nxv1i32.nxv2i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 1 x i1> %mask,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @test_vunzipe_i32m1(<vscale x 2 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.nxv2i32.nxv4i32.iXLen(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe(
     <vscale x 2 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     iXLen %vl)
@@ -347,7 +347,7 @@ define <vscale x 2 x i32> @test_vunzipe_mask_i32m1(<vscale x 2 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask.nxv2i32.nxv4i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 2 x i1> %mask,
@@ -361,7 +361,7 @@ define <vscale x 4 x i32> @test_vunzipe_i32m2(<vscale x 4 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.nxv4i32.nxv8i32.iXLen(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe(
     <vscale x 4 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     iXLen %vl)
@@ -374,7 +374,7 @@ define <vscale x 4 x i32> @test_vunzipe_mask_i32m2(<vscale x 4 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask.nxv4i32.nxv8i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 4 x i1> %mask,
@@ -388,7 +388,7 @@ define <vscale x 8 x i32> @test_vunzipe_i32m4(<vscale x 8 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.nxv8i32.nxv16i32.iXLen(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe(
     <vscale x 8 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     iXLen %vl)
@@ -401,7 +401,7 @@ define <vscale x 8 x i32> @test_vunzipe_mask_i32m4(<vscale x 8 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask.nxv8i32.nxv16i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipe.mask(
     <vscale x 8 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 8 x i1> %mask,
@@ -415,7 +415,7 @@ define <vscale x 1 x i64> @test_vunzipe_i64m1(<vscale x 1 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.nxv1i64.nxv2i64.iXLen(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe(
     <vscale x 1 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     iXLen %vl)
@@ -428,7 +428,7 @@ define <vscale x 1 x i64> @test_vunzipe_mask_i64m1(<vscale x 1 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask.nxv1i64.nxv2i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 1 x i1> %mask,
@@ -442,7 +442,7 @@ define <vscale x 2 x i64> @test_vunzipe_i64m2(<vscale x 2 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.nxv2i64.nxv4i64.iXLen(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe(
     <vscale x 2 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     iXLen %vl)
@@ -455,7 +455,7 @@ define <vscale x 2 x i64> @test_vunzipe_mask_i64m2(<vscale x 2 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask.nxv2i64.nxv4i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 2 x i1> %mask,
@@ -469,7 +469,7 @@ define <vscale x 4 x i64> @test_vunzipe_i64m4(<vscale x 4 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.nxv4i64.nxv8i64.iXLen(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe(
     <vscale x 4 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     iXLen %vl)
@@ -482,7 +482,7 @@ define <vscale x 4 x i64> @test_vunzipe_mask_i64m4(<vscale x 4 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask.nxv4i64.nxv8i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 4 x i1> %mask,
@@ -499,7 +499,7 @@ define <vscale x 1 x half> @test_vunzipe_f16mf4(<vscale x 1 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.nxv1f16.nxv2f16.iXLen(
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe(
     <vscale x 1 x half> %passthru,
     <vscale x 2 x half> %src,
     iXLen %vl)
@@ -512,7 +512,7 @@ define <vscale x 1 x half> @test_vunzipe_mask_f16mf4(<vscale x 1 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask.nxv1f16.nxv2f16(
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x half> %passthru,
     <vscale x 2 x half> %src,
     <vscale x 1 x i1> %m,
@@ -526,7 +526,7 @@ define <vscale x 2 x half> @test_vunzipe_f16mf2(<vscale x 2 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.nxv2f16.nxv4f16.iXLen(
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe(
     <vscale x 2 x half> %passthru,
     <vscale x 4 x half> %src,
     iXLen %vl)
@@ -539,7 +539,7 @@ define <vscale x 2 x half> @test_vunzipe_mask_f16mf2(<vscale x 2 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask.nxv2f16.nxv4f16(
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x half> %passthru,
     <vscale x 4 x half> %src,
     <vscale x 2 x i1> %m,
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @test_vunzipe_f16m1(<vscale x 4 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.nxv4f16.nxv8f16.iXLen(
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe(
     <vscale x 4 x half> %passthru,
     <vscale x 8 x half> %src,
     iXLen %vl)
@@ -566,7 +566,7 @@ define <vscale x 4 x half> @test_vunzipe_mask_f16m1(<vscale x 4 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask.nxv4f16.nxv8f16(
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x half> %passthru,
     <vscale x 8 x half> %src,
     <vscale x 4 x i1> %m,
@@ -580,7 +580,7 @@ define <vscale x 8 x half> @test_vunzipe_f16m2(<vscale x 8 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.nxv8f16.nxv16f16.iXLen(
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe(
     <vscale x 8 x half> %passthru,
     <vscale x 16 x half> %src,
     iXLen %vl)
@@ -593,7 +593,7 @@ define <vscale x 8 x half> @test_vunzipe_mask_f16m2(<vscale x 8 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask.nxv8f16.nxv16f16(
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipe.mask(
     <vscale x 8 x half> %passthru,
     <vscale x 16 x half> %src,
     <vscale x 8 x i1> %m,
@@ -607,7 +607,7 @@ define <vscale x 16 x half> @test_vunzipe_f16m4(<vscale x 16 x half> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.nxv16f16.nxv32f16.iXLen(
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe(
     <vscale x 16 x half> %passthru,
     <vscale x 32 x half> %src,
     iXLen %vl)
@@ -620,7 +620,7 @@ define <vscale x 16 x half> @test_vunzipe_mask_f16m4(<vscale x 16 x half> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask.nxv16f16.nxv32f16(
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipe.mask(
     <vscale x 16 x half> %passthru,
     <vscale x 32 x half> %src,
     <vscale x 16 x i1> %m,
@@ -635,7 +635,7 @@ define <vscale x 1 x float> @test_vunzipe_f32mf2(<vscale x 1 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.nxv1f32.nxv2f32.iXLen(
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe(
     <vscale x 1 x float> %passthru,
     <vscale x 2 x float> %src,
     iXLen %vl)
@@ -648,7 +648,7 @@ define <vscale x 1 x float> @test_vunzipe_mask_f32mf2(<vscale x 1 x float> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask.nxv1f32.nxv2f32(
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x float> %passthru,
     <vscale x 2 x float> %src,
     <vscale x 1 x i1> %m,
@@ -662,7 +662,7 @@ define <vscale x 2 x float> @test_vunzipe_f32m1(<vscale x 2 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.nxv2f32.nxv4f32.iXLen(
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe(
     <vscale x 2 x float> %passthru,
     <vscale x 4 x float> %src,
     iXLen %vl)
@@ -675,7 +675,7 @@ define <vscale x 2 x float> @test_vunzipe_mask_f32m1(<vscale x 2 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask.nxv2f32.nxv4f32(
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x float> %passthru,
     <vscale x 4 x float> %src,
     <vscale x 2 x i1> %m,
@@ -689,7 +689,7 @@ define <vscale x 4 x float> @test_vunzipe_f32m2(<vscale x 4 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.nxv4f32.nxv8f32.iXLen(
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe(
     <vscale x 4 x float> %passthru,
     <vscale x 8 x float> %src,
     iXLen %vl)
@@ -702,7 +702,7 @@ define <vscale x 4 x float> @test_vunzipe_mask_f32m2(<vscale x 4 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask.nxv4f32.nxv8f32(
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x float> %passthru,
     <vscale x 8 x float> %src,
     <vscale x 4 x i1> %m,
@@ -716,7 +716,7 @@ define <vscale x 8 x float> @test_vunzipe_f32m4(<vscale x 8 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.nxv8f32.nxv16f32.iXLen(
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe(
     <vscale x 8 x float> %passthru,
     <vscale x 16 x float> %src,
     iXLen %vl)
@@ -729,7 +729,7 @@ define <vscale x 8 x float> @test_vunzipe_mask_f32m4(<vscale x 8 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask.nxv8f32.nxv16f32(
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipe.mask(
     <vscale x 8 x float> %passthru,
     <vscale x 16 x float> %src,
     <vscale x 8 x i1> %m,
@@ -744,7 +744,7 @@ define <vscale x 1 x double> @test_vunzipe_f64m1(<vscale x 1 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.nxv1f64.nxv2f64.iXLen(
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe(
     <vscale x 1 x double> %passthru,
     <vscale x 2 x double> %src,
     iXLen %vl)
@@ -757,7 +757,7 @@ define <vscale x 1 x double> @test_vunzipe_mask_f64m1(<vscale x 1 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask.nxv1f64.nxv2f64(
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipe.mask(
     <vscale x 1 x double> %passthru,
     <vscale x 2 x double> %src,
     <vscale x 1 x i1> %m,
@@ -771,7 +771,7 @@ define <vscale x 2 x double> @test_vunzipe_f64m2(<vscale x 2 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.nxv2f64.nxv4f64.iXLen(
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe(
     <vscale x 2 x double> %passthru,
     <vscale x 4 x double> %src,
     iXLen %vl)
@@ -784,7 +784,7 @@ define <vscale x 2 x double> @test_vunzipe_mask_f64m2(<vscale x 2 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask.nxv2f64.nxv4f64(
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipe.mask(
     <vscale x 2 x double> %passthru,
     <vscale x 4 x double> %src,
     <vscale x 2 x i1> %m,
@@ -798,7 +798,7 @@ define <vscale x 4 x double> @test_vunzipe_f64m4(<vscale x 4 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipe.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.nxv4f64.nxv8f64.iXLen(
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe(
     <vscale x 4 x double> %passthru,
     <vscale x 8 x double> %src,
     iXLen %vl)
@@ -811,7 +811,7 @@ define <vscale x 4 x double> @test_vunzipe_mask_f64m4(<vscale x 4 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vunzipe.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask.nxv4f64.nxv8f64(
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipe.mask(
     <vscale x 4 x double> %passthru,
     <vscale x 8 x double> %src,
     <vscale x 4 x i1> %m,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
index 4d3aa18304cdc..fb595b40486f8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vunzipo.ll
@@ -10,7 +10,7 @@ define <vscale x 1 x i8> @test_vunzipo_i8mf8(<vscale x 1 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.nxv1i8.nxv2i8.iXLen(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo(
     <vscale x 1 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     iXLen %vl)
@@ -23,7 +23,7 @@ define <vscale x 1 x i8> @test_vunzipo_mask_i8mf8(<vscale x 1 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask.nxv1i8.nxv2i8(
+  %a = call <vscale x 1 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 1 x i1> %mask,
@@ -37,7 +37,7 @@ define <vscale x 2 x i8> @test_vunzipo_i8mf4(<vscale x 2 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.nxv2i8.nxv4i8.iXLen(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo(
     <vscale x 2 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     iXLen %vl)
@@ -50,7 +50,7 @@ define <vscale x 2 x i8> @test_vunzipo_mask_i8mf4(<vscale x 2 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask.nxv2i8.nxv4i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 2 x i1> %mask,
@@ -64,7 +64,7 @@ define <vscale x 4 x i8> @test_vunzipo_i8mf2(<vscale x 4 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.nxv4i8.nxv8i8.iXLen(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo(
     <vscale x 4 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     iXLen %vl)
@@ -77,7 +77,7 @@ define <vscale x 4 x i8> @test_vunzipo_mask_i8mf2(<vscale x 4 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask.nxv4i8.nxv8i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 4 x i1> %mask,
@@ -91,7 +91,7 @@ define <vscale x 8 x i8> @test_vunzipo_i8m1(<vscale x 8 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.nxv8i8.nxv16i8.iXLen(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo(
     <vscale x 8 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     iXLen %vl)
@@ -104,7 +104,7 @@ define <vscale x 8 x i8> @test_vunzipo_mask_i8m1(<vscale x 8 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask.nxv8i8.nxv16i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 8 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 8 x i1> %mask,
@@ -118,7 +118,7 @@ define <vscale x 16 x i8> @test_vunzipo_i8m2(<vscale x 16 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.nxv16i8.nxv32i8.iXLen(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo(
     <vscale x 16 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     iXLen %vl)
@@ -131,7 +131,7 @@ define <vscale x 16 x i8> @test_vunzipo_mask_i8m2(<vscale x 16 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask.nxv16i8.nxv32i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 16 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 16 x i1> %mask,
@@ -145,7 +145,7 @@ define <vscale x 32 x i8> @test_vunzipo_i8m4(<vscale x 32 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.nxv32i8.nxv64i8.iXLen(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo(
     <vscale x 32 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     iXLen %vl)
@@ -158,7 +158,7 @@ define <vscale x 32 x i8> @test_vunzipo_mask_i8m4(<vscale x 32 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask.nxv32i8.nxv64i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vunzipo.mask(
     <vscale x 32 x i8> %passthru,
     <vscale x 64 x i8> %arg1,
     <vscale x 32 x i1> %mask,
@@ -172,7 +172,7 @@ define <vscale x 1 x i16> @test_vunzipo_i16mf4(<vscale x 1 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.nxv1i16.nxv2i16.iXLen(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo(
     <vscale x 1 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     iXLen %vl)
@@ -185,7 +185,7 @@ define <vscale x 1 x i16> @test_vunzipo_mask_i16mf4(<vscale x 1 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask.nxv1i16.nxv2i16(
+  %a = call <vscale x 1 x i16> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 1 x i1> %mask,
@@ -199,7 +199,7 @@ define <vscale x 2 x i16> @test_vunzipo_i16mf2(<vscale x 2 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.nxv2i16.nxv4i16.iXLen(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo(
     <vscale x 2 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     iXLen %vl)
@@ -212,7 +212,7 @@ define <vscale x 2 x i16> @test_vunzipo_mask_i16mf2(<vscale x 2 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask.nxv2i16.nxv4i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 2 x i1> %mask,
@@ -226,7 +226,7 @@ define <vscale x 4 x i16> @test_vunzipo_i16m1(<vscale x 4 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.nxv4i16.nxv8i16.iXLen(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo(
     <vscale x 4 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     iXLen %vl)
@@ -239,7 +239,7 @@ define <vscale x 4 x i16> @test_vunzipo_mask_i16m1(<vscale x 4 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask.nxv4i16.nxv8i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 4 x i1> %mask,
@@ -253,7 +253,7 @@ define <vscale x 8 x i16> @test_vunzipo_i16m2(<vscale x 8 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.nxv8i16.nxv16i16.iXLen(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo(
     <vscale x 8 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     iXLen %vl)
@@ -266,7 +266,7 @@ define <vscale x 8 x i16> @test_vunzipo_mask_i16m2(<vscale x 8 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask.nxv8i16.nxv16i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vunzipo.mask(
     <vscale x 8 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 8 x i1> %mask,
@@ -280,7 +280,7 @@ define <vscale x 16 x i16> @test_vunzipo_i16m4(<vscale x 16 x i16> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.nxv16i16.nxv32i16.iXLen(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo(
     <vscale x 16 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     iXLen %vl)
@@ -293,7 +293,7 @@ define <vscale x 16 x i16> @test_vunzipo_mask_i16m4(<vscale x 16 x i16> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask.nxv16i16.nxv32i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vunzipo.mask(
     <vscale x 16 x i16> %passthru,
     <vscale x 32 x i16> %arg1,
     <vscale x 16 x i1> %mask,
@@ -307,7 +307,7 @@ define <vscale x 1 x i32> @test_vunzipo_i32mf2(<vscale x 1 x i32> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.nxv1i32.nxv2i32.iXLen(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo(
     <vscale x 1 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     iXLen %vl)
@@ -320,7 +320,7 @@ define <vscale x 1 x i32> @test_vunzipo_mask_i32mf2(<vscale x 1 x i32> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask.nxv1i32.nxv2i32(
+  %a = call <vscale x 1 x i32> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 1 x i1> %mask,
@@ -334,7 +334,7 @@ define <vscale x 2 x i32> @test_vunzipo_i32m1(<vscale x 2 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.nxv2i32.nxv4i32.iXLen(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo(
     <vscale x 2 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     iXLen %vl)
@@ -347,7 +347,7 @@ define <vscale x 2 x i32> @test_vunzipo_mask_i32m1(<vscale x 2 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask.nxv2i32.nxv4i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 2 x i1> %mask,
@@ -361,7 +361,7 @@ define <vscale x 4 x i32> @test_vunzipo_i32m2(<vscale x 4 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.nxv4i32.nxv8i32.iXLen(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo(
     <vscale x 4 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     iXLen %vl)
@@ -374,7 +374,7 @@ define <vscale x 4 x i32> @test_vunzipo_mask_i32m2(<vscale x 4 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask.nxv4i32.nxv8i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 4 x i1> %mask,
@@ -388,7 +388,7 @@ define <vscale x 8 x i32> @test_vunzipo_i32m4(<vscale x 8 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.nxv8i32.nxv16i32.iXLen(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo(
     <vscale x 8 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     iXLen %vl)
@@ -401,7 +401,7 @@ define <vscale x 8 x i32> @test_vunzipo_mask_i32m4(<vscale x 8 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask.nxv8i32.nxv16i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vunzipo.mask(
     <vscale x 8 x i32> %passthru,
     <vscale x 16 x i32> %arg1,
     <vscale x 8 x i1> %mask,
@@ -415,7 +415,7 @@ define <vscale x 1 x i64> @test_vunzipo_i64m1(<vscale x 1 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.nxv1i64.nxv2i64.iXLen(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo(
     <vscale x 1 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     iXLen %vl)
@@ -428,7 +428,7 @@ define <vscale x 1 x i64> @test_vunzipo_mask_i64m1(<vscale x 1 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask.nxv1i64.nxv2i64(
+  %a = call <vscale x 1 x i64> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 1 x i1> %mask,
@@ -442,7 +442,7 @@ define <vscale x 2 x i64> @test_vunzipo_i64m2(<vscale x 2 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.nxv2i64.nxv4i64.iXLen(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo(
     <vscale x 2 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     iXLen %vl)
@@ -455,7 +455,7 @@ define <vscale x 2 x i64> @test_vunzipo_mask_i64m2(<vscale x 2 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask.nxv2i64.nxv4i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 2 x i1> %mask,
@@ -469,7 +469,7 @@ define <vscale x 4 x i64> @test_vunzipo_i64m4(<vscale x 4 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.nxv4i64.nxv8i64.iXLen(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo(
     <vscale x 4 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     iXLen %vl)
@@ -482,7 +482,7 @@ define <vscale x 4 x i64> @test_vunzipo_mask_i64m4(<vscale x 4 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask.nxv4i64.nxv8i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x i64> %passthru,
     <vscale x 8 x i64> %arg1,
     <vscale x 4 x i1> %mask,
@@ -499,7 +499,7 @@ define <vscale x 1 x half> @test_vunzipo_f16mf4(<vscale x 1 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.nxv1f16.nxv2f16.iXLen(
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo(
     <vscale x 1 x half> %passthru,
     <vscale x 2 x half> %src,
     iXLen %vl)
@@ -512,7 +512,7 @@ define <vscale x 1 x half> @test_vunzipo_mask_f16mf4(<vscale x 1 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask.nxv1f16.nxv2f16(
+  %r = call <vscale x 1 x half> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x half> %passthru,
     <vscale x 2 x half> %src,
     <vscale x 1 x i1> %m,
@@ -526,7 +526,7 @@ define <vscale x 2 x half> @test_vunzipo_f16mf2(<vscale x 2 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.nxv2f16.nxv4f16.iXLen(
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo(
     <vscale x 2 x half> %passthru,
     <vscale x 4 x half> %src,
     iXLen %vl)
@@ -539,7 +539,7 @@ define <vscale x 2 x half> @test_vunzipo_mask_f16mf2(<vscale x 2 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask.nxv2f16.nxv4f16(
+  %r = call <vscale x 2 x half> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x half> %passthru,
     <vscale x 4 x half> %src,
     <vscale x 2 x i1> %m,
@@ -553,7 +553,7 @@ define <vscale x 4 x half> @test_vunzipo_f16m1(<vscale x 4 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.nxv4f16.nxv8f16.iXLen(
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo(
     <vscale x 4 x half> %passthru,
     <vscale x 8 x half> %src,
     iXLen %vl)
@@ -566,7 +566,7 @@ define <vscale x 4 x half> @test_vunzipo_mask_f16m1(<vscale x 4 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask.nxv4f16.nxv8f16(
+  %r = call <vscale x 4 x half> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x half> %passthru,
     <vscale x 8 x half> %src,
     <vscale x 4 x i1> %m,
@@ -580,7 +580,7 @@ define <vscale x 8 x half> @test_vunzipo_f16m2(<vscale x 8 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.nxv8f16.nxv16f16.iXLen(
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo(
     <vscale x 8 x half> %passthru,
     <vscale x 16 x half> %src,
     iXLen %vl)
@@ -593,7 +593,7 @@ define <vscale x 8 x half> @test_vunzipo_mask_f16m2(<vscale x 8 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask.nxv8f16.nxv16f16(
+  %r = call <vscale x 8 x half> @llvm.riscv.vunzipo.mask(
     <vscale x 8 x half> %passthru,
     <vscale x 16 x half> %src,
     <vscale x 8 x i1> %m,
@@ -607,7 +607,7 @@ define <vscale x 16 x half> @test_vunzipo_f16m4(<vscale x 16 x half> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.nxv16f16.nxv32f16.iXLen(
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo(
     <vscale x 16 x half> %passthru,
     <vscale x 32 x half> %src,
     iXLen %vl)
@@ -620,7 +620,7 @@ define <vscale x 16 x half> @test_vunzipo_mask_f16m4(<vscale x 16 x half> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask.nxv16f16.nxv32f16(
+  %r = call <vscale x 16 x half> @llvm.riscv.vunzipo.mask(
     <vscale x 16 x half> %passthru,
     <vscale x 32 x half> %src,
     <vscale x 16 x i1> %m,
@@ -635,7 +635,7 @@ define <vscale x 1 x float> @test_vunzipo_f32mf2(<vscale x 1 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v9
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.nxv1f32.nxv2f32.iXLen(
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo(
     <vscale x 1 x float> %passthru,
     <vscale x 2 x float> %src,
     iXLen %vl)
@@ -648,7 +648,7 @@ define <vscale x 1 x float> @test_vunzipo_mask_f32mf2(<vscale x 1 x float> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v9, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask.nxv1f32.nxv2f32(
+  %r = call <vscale x 1 x float> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x float> %passthru,
     <vscale x 2 x float> %src,
     <vscale x 1 x i1> %m,
@@ -662,7 +662,7 @@ define <vscale x 2 x float> @test_vunzipo_f32m1(<vscale x 2 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.nxv2f32.nxv4f32.iXLen(
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo(
     <vscale x 2 x float> %passthru,
     <vscale x 4 x float> %src,
     iXLen %vl)
@@ -675,7 +675,7 @@ define <vscale x 2 x float> @test_vunzipo_mask_f32m1(<vscale x 2 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask.nxv2f32.nxv4f32(
+  %r = call <vscale x 2 x float> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x float> %passthru,
     <vscale x 4 x float> %src,
     <vscale x 2 x i1> %m,
@@ -689,7 +689,7 @@ define <vscale x 4 x float> @test_vunzipo_f32m2(<vscale x 4 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.nxv4f32.nxv8f32.iXLen(
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo(
     <vscale x 4 x float> %passthru,
     <vscale x 8 x float> %src,
     iXLen %vl)
@@ -702,7 +702,7 @@ define <vscale x 4 x float> @test_vunzipo_mask_f32m2(<vscale x 4 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask.nxv4f32.nxv8f32(
+  %r = call <vscale x 4 x float> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x float> %passthru,
     <vscale x 8 x float> %src,
     <vscale x 4 x i1> %m,
@@ -716,7 +716,7 @@ define <vscale x 8 x float> @test_vunzipo_f32m4(<vscale x 8 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.nxv8f32.nxv16f32.iXLen(
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo(
     <vscale x 8 x float> %passthru,
     <vscale x 16 x float> %src,
     iXLen %vl)
@@ -729,7 +729,7 @@ define <vscale x 8 x float> @test_vunzipo_mask_f32m4(<vscale x 8 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask.nxv8f32.nxv16f32(
+  %r = call <vscale x 8 x float> @llvm.riscv.vunzipo.mask(
     <vscale x 8 x float> %passthru,
     <vscale x 16 x float> %src,
     <vscale x 8 x i1> %m,
@@ -744,7 +744,7 @@ define <vscale x 1 x double> @test_vunzipo_f64m1(<vscale x 1 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.nxv1f64.nxv2f64.iXLen(
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo(
     <vscale x 1 x double> %passthru,
     <vscale x 2 x double> %src,
     iXLen %vl)
@@ -757,7 +757,7 @@ define <vscale x 1 x double> @test_vunzipo_mask_f64m1(<vscale x 1 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask.nxv1f64.nxv2f64(
+  %r = call <vscale x 1 x double> @llvm.riscv.vunzipo.mask(
     <vscale x 1 x double> %passthru,
     <vscale x 2 x double> %src,
     <vscale x 1 x i1> %m,
@@ -771,7 +771,7 @@ define <vscale x 2 x double> @test_vunzipo_f64m2(<vscale x 2 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v12
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.nxv2f64.nxv4f64.iXLen(
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo(
     <vscale x 2 x double> %passthru,
     <vscale x 4 x double> %src,
     iXLen %vl)
@@ -784,7 +784,7 @@ define <vscale x 2 x double> @test_vunzipo_mask_f64m2(<vscale x 2 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v12, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask.nxv2f64.nxv4f64(
+  %r = call <vscale x 2 x double> @llvm.riscv.vunzipo.mask(
     <vscale x 2 x double> %passthru,
     <vscale x 4 x double> %src,
     <vscale x 2 x i1> %m,
@@ -798,7 +798,7 @@ define <vscale x 4 x double> @test_vunzipo_f64m4(<vscale x 4 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vunzipo.v v8, v16
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.nxv4f64.nxv8f64.iXLen(
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo(
     <vscale x 4 x double> %passthru,
     <vscale x 8 x double> %src,
     iXLen %vl)
@@ -811,7 +811,7 @@ define <vscale x 4 x double> @test_vunzipo_mask_f64m4(<vscale x 4 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vunzipo.v v8, v16, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask.nxv4f64.nxv8f64(
+  %r = call <vscale x 4 x double> @llvm.riscv.vunzipo.mask(
     <vscale x 4 x double> %passthru,
     <vscale x 8 x double> %src,
     <vscale x 4 x i1> %m,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vzip.ll b/llvm/test/CodeGen/RISCV/rvv/vzip.ll
index afba044383abc..d7d61c79e79d8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vzip.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vzip.ll
@@ -10,7 +10,7 @@ define <vscale x 2 x i8> @test_vzip_vv_i8mf4(<vscale x 2 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vzip.nxv2i8.nxv1i8.iXLen(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vzip(
     <vscale x 2 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -24,7 +24,7 @@ define <vscale x 2 x i8> @test_vzip_vv_mask_i8mf4(<vscale x 2 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i8> @llvm.riscv.vzip.mask.nxv2i8.nxv1i8(
+  %a = call <vscale x 2 x i8> @llvm.riscv.vzip.mask(
     <vscale x 2 x i8> %passthru,
     <vscale x 1 x i8> %arg1,
     <vscale x 1 x i8> %arg2,
@@ -39,7 +39,7 @@ define <vscale x 4 x i8> @test_vzip_vv_i8mf2(<vscale x 4 x i8> %passthru, <vscal
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vzip.nxv4i8.nxv2i8.iXLen(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vzip(
     <vscale x 4 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -53,7 +53,7 @@ define <vscale x 4 x i8> @test_vzip_vv_mask_i8mf2(<vscale x 4 x i8> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i8> @llvm.riscv.vzip.mask.nxv4i8.nxv2i8(
+  %a = call <vscale x 4 x i8> @llvm.riscv.vzip.mask(
     <vscale x 4 x i8> %passthru,
     <vscale x 2 x i8> %arg1,
     <vscale x 2 x i8> %arg2,
@@ -68,7 +68,7 @@ define <vscale x 8 x i8> @test_vzip_vv_i8m1(<vscale x 8 x i8> %passthru, <vscale
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vzip.nxv8i8.nxv4i8.iXLen(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vzip(
     <vscale x 8 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -82,7 +82,7 @@ define <vscale x 8 x i8> @test_vzip_vv_mask_i8m1(<vscale x 8 x i8> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i8> @llvm.riscv.vzip.mask.nxv8i8.nxv4i8(
+  %a = call <vscale x 8 x i8> @llvm.riscv.vzip.mask(
     <vscale x 8 x i8> %passthru,
     <vscale x 4 x i8> %arg1,
     <vscale x 4 x i8> %arg2,
@@ -97,7 +97,7 @@ define <vscale x 16 x i8> @test_vzip_vv_i8m2(<vscale x 16 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vzip.nxv16i8.nxv8i8.iXLen(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vzip(
     <vscale x 16 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -111,7 +111,7 @@ define <vscale x 16 x i8> @test_vzip_vv_mask_i8m2(<vscale x 16 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i8> @llvm.riscv.vzip.mask.nxv16i8.nxv8i8(
+  %a = call <vscale x 16 x i8> @llvm.riscv.vzip.mask(
     <vscale x 16 x i8> %passthru,
     <vscale x 8 x i8> %arg1,
     <vscale x 8 x i8> %arg2,
@@ -126,7 +126,7 @@ define <vscale x 32 x i8> @test_vzip_vv_i8m4(<vscale x 32 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vzip.nxv32i8.nxv16i8.iXLen(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vzip(
     <vscale x 32 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -140,7 +140,7 @@ define <vscale x 32 x i8> @test_vzip_vv_mask_i8m4(<vscale x 32 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i8> @llvm.riscv.vzip.mask.nxv32i8.nxv16i8(
+  %a = call <vscale x 32 x i8> @llvm.riscv.vzip.mask(
     <vscale x 32 x i8> %passthru,
     <vscale x 16 x i8> %arg1,
     <vscale x 16 x i8> %arg2,
@@ -155,7 +155,7 @@ define <vscale x 64 x i8> @test_vzip_vv_i8m8(<vscale x 64 x i8> %passthru, <vsca
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vzip.nxv64i8.nxv32i8.iXLen(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vzip(
     <vscale x 64 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -169,7 +169,7 @@ define <vscale x 64 x i8> @test_vzip_vv_mask_i8m8(<vscale x 64 x i8> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 64 x i8> @llvm.riscv.vzip.mask.nxv64i8.nxv32i8(
+  %a = call <vscale x 64 x i8> @llvm.riscv.vzip.mask(
     <vscale x 64 x i8> %passthru,
     <vscale x 32 x i8> %arg1,
     <vscale x 32 x i8> %arg2,
@@ -184,7 +184,7 @@ define <vscale x 2 x i16> @test_vzip_vv_i16mf2(<vscale x 2 x i16> %passthru, <vs
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vzip.nxv2i16.nxv1i16.iXLen(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vzip(
     <vscale x 2 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -198,7 +198,7 @@ define <vscale x 2 x i16> @test_vzip_vv_mask_i16mf2(<vscale x 2 x i16> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i16> @llvm.riscv.vzip.mask.nxv2i16.nxv1i16(
+  %a = call <vscale x 2 x i16> @llvm.riscv.vzip.mask(
     <vscale x 2 x i16> %passthru,
     <vscale x 1 x i16> %arg1,
     <vscale x 1 x i16> %arg2,
@@ -213,7 +213,7 @@ define <vscale x 4 x i16> @test_vzip_vv_i16m1(<vscale x 4 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vzip.nxv4i16.nxv2i16.iXLen(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vzip(
     <vscale x 4 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -227,7 +227,7 @@ define <vscale x 4 x i16> @test_vzip_vv_mask_i16m1(<vscale x 4 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i16> @llvm.riscv.vzip.mask.nxv4i16.nxv2i16(
+  %a = call <vscale x 4 x i16> @llvm.riscv.vzip.mask(
     <vscale x 4 x i16> %passthru,
     <vscale x 2 x i16> %arg1,
     <vscale x 2 x i16> %arg2,
@@ -242,7 +242,7 @@ define <vscale x 8 x i16> @test_vzip_vv_i16m2(<vscale x 8 x i16> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vzip.nxv8i16.nxv4i16.iXLen(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vzip(
     <vscale x 8 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -256,7 +256,7 @@ define <vscale x 8 x i16> @test_vzip_vv_mask_i16m2(<vscale x 8 x i16> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i16> @llvm.riscv.vzip.mask.nxv8i16.nxv4i16(
+  %a = call <vscale x 8 x i16> @llvm.riscv.vzip.mask(
     <vscale x 8 x i16> %passthru,
     <vscale x 4 x i16> %arg1,
     <vscale x 4 x i16> %arg2,
@@ -271,7 +271,7 @@ define <vscale x 16 x i16> @test_vzip_vv_i16m4(<vscale x 16 x i16> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vzip.nxv16i16.nxv8i16.iXLen(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vzip(
     <vscale x 16 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -285,7 +285,7 @@ define <vscale x 16 x i16> @test_vzip_vv_mask_i16m4(<vscale x 16 x i16> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i16> @llvm.riscv.vzip.mask.nxv16i16.nxv8i16(
+  %a = call <vscale x 16 x i16> @llvm.riscv.vzip.mask(
     <vscale x 16 x i16> %passthru,
     <vscale x 8 x i16> %arg1,
     <vscale x 8 x i16> %arg2,
@@ -300,7 +300,7 @@ define <vscale x 32 x i16> @test_vzip_vv_i16m8(<vscale x 32 x i16> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vzip.nxv32i16.nxv16i16.iXLen(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vzip(
     <vscale x 32 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -314,7 +314,7 @@ define <vscale x 32 x i16> @test_vzip_vv_mask_i16m8(<vscale x 32 x i16> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 32 x i16> @llvm.riscv.vzip.mask.nxv32i16.nxv16i16(
+  %a = call <vscale x 32 x i16> @llvm.riscv.vzip.mask(
     <vscale x 32 x i16> %passthru,
     <vscale x 16 x i16> %arg1,
     <vscale x 16 x i16> %arg2,
@@ -329,7 +329,7 @@ define <vscale x 2 x i32> @test_vzip_vv_i32m1(<vscale x 2 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzip.nxv2i32.nxv1i32.iXLen(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vzip(
     <vscale x 2 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -343,7 +343,7 @@ define <vscale x 2 x i32> @test_vzip_vv_mask_i32m1(<vscale x 2 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i32> @llvm.riscv.vzip.mask.nxv2i32.nxv1i32(
+  %a = call <vscale x 2 x i32> @llvm.riscv.vzip.mask(
     <vscale x 2 x i32> %passthru,
     <vscale x 1 x i32> %arg1,
     <vscale x 1 x i32> %arg2,
@@ -358,7 +358,7 @@ define <vscale x 4 x i32> @test_vzip_vv_i32m2(<vscale x 4 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzip.nxv4i32.nxv2i32.iXLen(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vzip(
     <vscale x 4 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -372,7 +372,7 @@ define <vscale x 4 x i32> @test_vzip_vv_mask_i32m2(<vscale x 4 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i32> @llvm.riscv.vzip.mask.nxv4i32.nxv2i32(
+  %a = call <vscale x 4 x i32> @llvm.riscv.vzip.mask(
     <vscale x 4 x i32> %passthru,
     <vscale x 2 x i32> %arg1,
     <vscale x 2 x i32> %arg2,
@@ -387,7 +387,7 @@ define <vscale x 8 x i32> @test_vzip_vv_i32m4(<vscale x 8 x i32> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzip.nxv8i32.nxv4i32.iXLen(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vzip(
     <vscale x 8 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -401,7 +401,7 @@ define <vscale x 8 x i32> @test_vzip_vv_mask_i32m4(<vscale x 8 x i32> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i32> @llvm.riscv.vzip.mask.nxv8i32.nxv4i32(
+  %a = call <vscale x 8 x i32> @llvm.riscv.vzip.mask(
     <vscale x 8 x i32> %passthru,
     <vscale x 4 x i32> %arg1,
     <vscale x 4 x i32> %arg2,
@@ -416,7 +416,7 @@ define <vscale x 16 x i32> @test_vzip_vv_i32m8(<vscale x 16 x i32> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzip.nxv16i32.nxv8i32.iXLen(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vzip(
     <vscale x 16 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -430,7 +430,7 @@ define <vscale x 16 x i32> @test_vzip_vv_mask_i32m8(<vscale x 16 x i32> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 16 x i32> @llvm.riscv.vzip.mask.nxv16i32.nxv8i32(
+  %a = call <vscale x 16 x i32> @llvm.riscv.vzip.mask(
     <vscale x 16 x i32> %passthru,
     <vscale x 8 x i32> %arg1,
     <vscale x 8 x i32> %arg2,
@@ -445,7 +445,7 @@ define <vscale x 2 x i64> @test_vzip_vv_i64m2(<vscale x 2 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzip.nxv2i64.nxv1i64.iXLen(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vzip(
     <vscale x 2 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -459,7 +459,7 @@ define <vscale x 2 x i64> @test_vzip_vv_mask_i64m2(<vscale x 2 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 2 x i64> @llvm.riscv.vzip.mask.nxv2i64.nxv1i64(
+  %a = call <vscale x 2 x i64> @llvm.riscv.vzip.mask(
     <vscale x 2 x i64> %passthru,
     <vscale x 1 x i64> %arg1,
     <vscale x 1 x i64> %arg2,
@@ -474,7 +474,7 @@ define <vscale x 4 x i64> @test_vzip_vv_i64m4(<vscale x 4 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzip.nxv4i64.nxv2i64.iXLen(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vzip(
     <vscale x 4 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -488,7 +488,7 @@ define <vscale x 4 x i64> @test_vzip_vv_mask_i64m4(<vscale x 4 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 4 x i64> @llvm.riscv.vzip.mask.nxv4i64.nxv2i64(
+  %a = call <vscale x 4 x i64> @llvm.riscv.vzip.mask(
     <vscale x 4 x i64> %passthru,
     <vscale x 2 x i64> %arg1,
     <vscale x 2 x i64> %arg2,
@@ -503,7 +503,7 @@ define <vscale x 8 x i64> @test_vzip_vv_i64m8(<vscale x 8 x i64> %passthru, <vsc
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzip.nxv8i64.nxv4i64.iXLen(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vzip(
     <vscale x 8 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -517,7 +517,7 @@ define <vscale x 8 x i64> @test_vzip_vv_mask_i64m8(<vscale x 8 x i64> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %a = call <vscale x 8 x i64> @llvm.riscv.vzip.mask.nxv8i64.nxv4i64(
+  %a = call <vscale x 8 x i64> @llvm.riscv.vzip.mask(
     <vscale x 8 x i64> %passthru,
     <vscale x 4 x i64> %arg1,
     <vscale x 4 x i64> %arg2,
@@ -535,7 +535,7 @@ define <vscale x 2 x half> @test_vzip_vv_f16mf4(<vscale x 2 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vzip.nxv2f16.nxv1f16.iXLen(
+  %r = call <vscale x 2 x half> @llvm.riscv.vzip(
     <vscale x 2 x half> %passthru,
     <vscale x 1 x half> %a,
     <vscale x 1 x half> %b,
@@ -549,7 +549,7 @@ define <vscale x 2 x half> @test_vzip_vv_mask_f16mf4(<vscale x 2 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x half> @llvm.riscv.vzip.mask.nxv2f16.nxv1f16(
+  %r = call <vscale x 2 x half> @llvm.riscv.vzip.mask(
     <vscale x 2 x half> %passthru,
     <vscale x 1 x half> %a,
     <vscale x 1 x half> %b,
@@ -564,7 +564,7 @@ define <vscale x 4 x half> @test_vzip_vv_f16mf2(<vscale x 4 x half> %passthru, <
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vzip.nxv4f16.nxv2f16.iXLen(
+  %r = call <vscale x 4 x half> @llvm.riscv.vzip(
     <vscale x 4 x half> %passthru,
     <vscale x 2 x half> %a,
     <vscale x 2 x half> %b,
@@ -578,7 +578,7 @@ define <vscale x 4 x half> @test_vzip_vv_mask_f16mf2(<vscale x 4 x half> %passth
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x half> @llvm.riscv.vzip.mask.nxv4f16.nxv2f16(
+  %r = call <vscale x 4 x half> @llvm.riscv.vzip.mask(
     <vscale x 4 x half> %passthru,
     <vscale x 2 x half> %a,
     <vscale x 2 x half> %b,
@@ -593,7 +593,7 @@ define <vscale x 8 x half> @test_vzip_vv_f16m1(<vscale x 8 x half> %passthru, <v
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vzip.nxv8f16.nxv4f16.iXLen(
+  %r = call <vscale x 8 x half> @llvm.riscv.vzip(
     <vscale x 8 x half> %passthru,
     <vscale x 4 x half> %a,
     <vscale x 4 x half> %b,
@@ -607,7 +607,7 @@ define <vscale x 8 x half> @test_vzip_vv_mask_f16m1(<vscale x 8 x half> %passthr
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x half> @llvm.riscv.vzip.mask.nxv8f16.nxv4f16(
+  %r = call <vscale x 8 x half> @llvm.riscv.vzip.mask(
     <vscale x 8 x half> %passthru,
     <vscale x 4 x half> %a,
     <vscale x 4 x half> %b,
@@ -622,7 +622,7 @@ define <vscale x 16 x half> @test_vzip_vv_f16m2(<vscale x 16 x half> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vzip.nxv16f16.nxv8f16.iXLen(
+  %r = call <vscale x 16 x half> @llvm.riscv.vzip(
     <vscale x 16 x half> %passthru,
     <vscale x 8 x half> %a,
     <vscale x 8 x half> %b,
@@ -636,7 +636,7 @@ define <vscale x 16 x half> @test_vzip_vv_mask_f16m2(<vscale x 16 x half> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x half> @llvm.riscv.vzip.mask.nxv16f16.nxv8f16(
+  %r = call <vscale x 16 x half> @llvm.riscv.vzip.mask(
     <vscale x 16 x half> %passthru,
     <vscale x 8 x half> %a,
     <vscale x 8 x half> %b,
@@ -651,7 +651,7 @@ define <vscale x 32 x half> @test_vzip_vv_f16m4(<vscale x 32 x half> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vzip.nxv32f16.nxv16f16.iXLen(
+  %r = call <vscale x 32 x half> @llvm.riscv.vzip(
     <vscale x 32 x half> %passthru,
     <vscale x 16 x half> %a,
     <vscale x 16 x half> %b,
@@ -665,7 +665,7 @@ define <vscale x 32 x half> @test_vzip_vv_mask_f16m4(<vscale x 32 x half> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 32 x half> @llvm.riscv.vzip.mask.nxv32f16.nxv16f16(
+  %r = call <vscale x 32 x half> @llvm.riscv.vzip.mask(
     <vscale x 32 x half> %passthru,
     <vscale x 16 x half> %a,
     <vscale x 16 x half> %b,
@@ -681,7 +681,7 @@ define <vscale x 2 x float> @test_vzip_vv_f32mf2(<vscale x 2 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v9, v10
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vzip.nxv2f32.nxv1f32.iXLen(
+  %r = call <vscale x 2 x float> @llvm.riscv.vzip(
     <vscale x 2 x float> %passthru,
     <vscale x 1 x float> %a,
     <vscale x 1 x float> %b,
@@ -695,7 +695,7 @@ define <vscale x 2 x float> @test_vzip_vv_mask_f32mf2(<vscale x 2 x float> %pass
 ; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v9, v10, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x float> @llvm.riscv.vzip.mask.nxv2f32.nxv1f32(
+  %r = call <vscale x 2 x float> @llvm.riscv.vzip.mask(
     <vscale x 2 x float> %passthru,
     <vscale x 1 x float> %a,
     <vscale x 1 x float> %b,
@@ -710,7 +710,7 @@ define <vscale x 4 x float> @test_vzip_vv_f32m1(<vscale x 4 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vzip.nxv4f32.nxv2f32.iXLen(
+  %r = call <vscale x 4 x float> @llvm.riscv.vzip(
     <vscale x 4 x float> %passthru,
     <vscale x 2 x float> %a,
     <vscale x 2 x float> %b,
@@ -724,7 +724,7 @@ define <vscale x 4 x float> @test_vzip_vv_mask_f32m1(<vscale x 4 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x float> @llvm.riscv.vzip.mask.nxv4f32.nxv2f32(
+  %r = call <vscale x 4 x float> @llvm.riscv.vzip.mask(
     <vscale x 4 x float> %passthru,
     <vscale x 2 x float> %a,
     <vscale x 2 x float> %b,
@@ -739,7 +739,7 @@ define <vscale x 8 x float> @test_vzip_vv_f32m2(<vscale x 8 x float> %passthru,
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vzip.nxv8f32.nxv4f32.iXLen(
+  %r = call <vscale x 8 x float> @llvm.riscv.vzip(
     <vscale x 8 x float> %passthru,
     <vscale x 4 x float> %a,
     <vscale x 4 x float> %b,
@@ -753,7 +753,7 @@ define <vscale x 8 x float> @test_vzip_vv_mask_f32m2(<vscale x 8 x float> %passt
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x float> @llvm.riscv.vzip.mask.nxv8f32.nxv4f32(
+  %r = call <vscale x 8 x float> @llvm.riscv.vzip.mask(
     <vscale x 8 x float> %passthru,
     <vscale x 4 x float> %a,
     <vscale x 4 x float> %b,
@@ -768,7 +768,7 @@ define <vscale x 16 x float> @test_vzip_vv_f32m4(<vscale x 16 x float> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vzip.nxv16f32.nxv8f32.iXLen(
+  %r = call <vscale x 16 x float> @llvm.riscv.vzip(
     <vscale x 16 x float> %passthru,
     <vscale x 8 x float> %a,
     <vscale x 8 x float> %b,
@@ -782,7 +782,7 @@ define <vscale x 16 x float> @test_vzip_vv_mask_f32m4(<vscale x 16 x float> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 16 x float> @llvm.riscv.vzip.mask.nxv16f32.nxv8f32(
+  %r = call <vscale x 16 x float> @llvm.riscv.vzip.mask(
     <vscale x 16 x float> %passthru,
     <vscale x 8 x float> %a,
     <vscale x 8 x float> %b,
@@ -798,7 +798,7 @@ define <vscale x 2 x double> @test_vzip_vv_f64m1(<vscale x 2 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v10, v11
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vzip.nxv2f64.nxv1f64.iXLen(
+  %r = call <vscale x 2 x double> @llvm.riscv.vzip(
     <vscale x 2 x double> %passthru,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
@@ -812,7 +812,7 @@ define <vscale x 2 x double> @test_vzip_vv_mask_f64m1(<vscale x 2 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v10, v11, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 2 x double> @llvm.riscv.vzip.mask.nxv2f64.nxv1f64(
+  %r = call <vscale x 2 x double> @llvm.riscv.vzip.mask(
     <vscale x 2 x double> %passthru,
     <vscale x 1 x double> %a,
     <vscale x 1 x double> %b,
@@ -827,7 +827,7 @@ define <vscale x 4 x double> @test_vzip_vv_f64m2(<vscale x 4 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v12, v14
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vzip.nxv4f64.nxv2f64.iXLen(
+  %r = call <vscale x 4 x double> @llvm.riscv.vzip(
     <vscale x 4 x double> %passthru,
     <vscale x 2 x double> %a,
     <vscale x 2 x double> %b,
@@ -841,7 +841,7 @@ define <vscale x 4 x double> @test_vzip_vv_mask_f64m2(<vscale x 4 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v12, v14, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 4 x double> @llvm.riscv.vzip.mask.nxv4f64.nxv2f64(
+  %r = call <vscale x 4 x double> @llvm.riscv.vzip.mask(
     <vscale x 4 x double> %passthru,
     <vscale x 2 x double> %a,
     <vscale x 2 x double> %b,
@@ -856,7 +856,7 @@ define <vscale x 8 x double> @test_vzip_vv_f64m4(<vscale x 8 x double> %passthru
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, ma
 ; CHECK-NEXT:    vzip.vv v8, v16, v20
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vzip.nxv8f64.nxv4f64.iXLen(
+  %r = call <vscale x 8 x double> @llvm.riscv.vzip(
     <vscale x 8 x double> %passthru,
     <vscale x 4 x double> %a,
     <vscale x 4 x double> %b,
@@ -870,7 +870,7 @@ define <vscale x 8 x double> @test_vzip_vv_mask_f64m4(<vscale x 8 x double> %pas
 ; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
 ; CHECK-NEXT:    vzip.vv v8, v16, v20, v0.t
 ; CHECK-NEXT:    ret
-  %r = call <vscale x 8 x double> @llvm.riscv.vzip.mask.nxv8f64.nxv4f64(
+  %r = call <vscale x 8 x double> @llvm.riscv.vzip.mask(
     <vscale x 8 x double> %passthru,
     <vscale x 4 x double> %a,
     <vscale x 4 x double> %b,

>From 723d028563053e6f17b54e0e20ac48ac5c71a751 Mon Sep 17 00:00:00 2001
From: Pengcheng Wang <wangpengcheng.pp at bytedance.com>
Date: Tue, 31 Mar 2026 11:48:45 +0800
Subject: [PATCH 5/5] Add ExtraPreds and format

---
 llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td | 22 +++++++++++++-------
 1 file changed, 14 insertions(+), 8 deletions(-)

diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
index 696b8ec01c8d7..30613b2e1be05 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZvzip.td
@@ -84,7 +84,8 @@ multiclass VPseudoVUNZIP {
     defvar mx = m.MX;
     let VLMul = m.value in {
       def "_V_" # mx : VPseudoUnaryNoMask<m.vrclass, m.wvrclass>,
-                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx, forcePassthruRead=true>;
+                       SchedUnary<"WriteVIALUV", "ReadVIALUV", mx,
+                                  forcePassthruRead=true>;
       def "_V_" # mx # "_MASK" :
         VPseudoUnaryMask<m.vrclass, m.wvrclass>,
         RISCVMaskedPseudo<MaskIdx=2>,
@@ -102,11 +103,13 @@ let Predicates = [HasStdExtZvzip],
   defm PseudoVPAIRO  : VPseudoVALU_VV;
 }
 
-multiclass VPatVUNZIPIntrinsic<string intrinsic_name, string instruction_name> {
+multiclass VPatVUnzipIntrinsic<string intrinsic_name,
+                               string instruction_name> {
   foreach VtiToWti = AllZvzipVectors in {
     defvar vti = VtiToWti.Vti;
     defvar wti = VtiToWti.Wti;
-    let Predicates = !listconcat(GetVTypePredicates<wti>.Predicates, [HasStdExtZvzip]) in {
+    let Predicates = !listconcat(GetVTypePredicates<wti>.Predicates,
+                                 [HasStdExtZvzip]) in {
       def : VPatUnaryNoMask<intrinsic_name, instruction_name, "V",
                             vti.Vector, wti.Vector, vti.Log2SEW,
                             vti.LMul, vti.RegClass, wti.RegClass>;
@@ -117,8 +120,11 @@ multiclass VPatVUNZIPIntrinsic<string intrinsic_name, string instruction_name> {
   }
 }
 
-defm : VPatVUNZIPIntrinsic<"int_riscv_vunzipe", "PseudoVUNZIPE">;
-defm : VPatVUNZIPIntrinsic<"int_riscv_vunzipo", "PseudoVUNZIPO">;
-defm : VPatBinaryV_VV<"int_riscv_vpaire", "PseudoVPAIRE", AllVectors>;
-defm : VPatBinaryV_VV<"int_riscv_vpairo", "PseudoVPAIRO", AllVectors>;
-defm : VPatBinaryW_VV<"int_riscv_vzip", "PseudoVZIP", AllZvzipVectors>;
+defm : VPatVUnzipIntrinsic<"int_riscv_vunzipe", "PseudoVUNZIPE">;
+defm : VPatVUnzipIntrinsic<"int_riscv_vunzipo", "PseudoVUNZIPO">;
+defm : VPatBinaryV_VV<"int_riscv_vpaire", "PseudoVPAIRE", AllVectors,
+                      ExtraPreds = [HasStdExtZvzip]>;
+defm : VPatBinaryV_VV<"int_riscv_vpairo", "PseudoVPAIRO", AllVectors,
+                      ExtraPreds = [HasStdExtZvzip]>;
+defm : VPatBinaryW_VV<"int_riscv_vzip", "PseudoVZIP", AllZvzipVectors,
+                      ExtraPreds = [HasStdExtZvzip]>;



More information about the llvm-commits mailing list